diff --git "a/3162.jsonl" "b/3162.jsonl" new file mode 100644--- /dev/null +++ "b/3162.jsonl" @@ -0,0 +1,753 @@ +{"seq_id":"539293490","text":"import pygame\nfrom pysmile.renderer import Renderer\nfrom pysmile.renderers.tile_renderer import TileRenderer\nfrom .tilemap import TileMap\nfrom OpenGL.GL import *\n\n\nclass TileMapRenderer(Renderer):\n def __init__(self, size=None):\n self.size = size\n self.tm_hash = None\n self.dl = None\n\n def render(self, entity, rect):\n tmap = entity.get_component(TileMap)\n if map is None:\n return\n tm = tmap.tile_map\n if self.tm_hash != str(tm) or self.dl is None:\n self.tm_hash = str(tm)\n self.dl = glGenLists(1)\n glNewList(self.dl, GL_COMPILE)\n glEnable(GL_TEXTURE_2D)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE)\n\n for row in tm:\n for tiles in row:\n for tile in tiles:\n glTranslate(tile.rect.x, tile.rect.y, 0)\n tile.texture.render(tmap.tileset, (tile.rect.width, tile.rect.height))\n glTranslate(-tile.rect.x, -tile.rect.y, 0)\n glEndList()\n glCallList(self.dl)\n\n\n\n","sub_path":"pysmile/tilemap/tilemap_renderer.py","file_name":"tilemap_renderer.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"94077561","text":"import requests\nimport logging\nfrom datetime import datetime as dt\n#from Util import Response\n\nlog = logging.getLogger()\nlog.setLevel(logging.INFO)\n\napi_url = 'https://7teqpb3w39.execute-api.us-east-1.amazonaws.com/dev/'\n\ndef cognitoUserToRDS(event, context):\n # Create RDS Entry\n cognitoRdsUserEndpoint = 'cognitoUserToRDS'\n rds_url = api_url + cognitoRdsUserEndpoint\n datestamp = dt.now().strftime('%Y-%m-%dT%H:%M:%S')\n payload = {\"email\": event[\"request\"][\"userAttributes\"][\"email\"],\n \"email_verified\": event[\"request\"][\"userAttributes\"][\"email_verified\"],\n \"datestamp\": datestamp,\n \"userPoolId\":event[\"userPoolId\"],\n \"userName\": event[\"userName\"]}\n r = requests.post(url=rds_url, data=payload)\n\n cognitoS3UserEndpoint = 'createCognitoUserKey'\n s3_url = api_url + cognitoS3UserEndpoint\n r = requests.post(url=s3_url, data=payload)\n\n return event\n","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"162627778","text":"from gpiozero import AngularServo\nimport time\n\nprint(\"Initialising Servos...\")\n\ngpio = [4, 17, 18, 27, 22, 23, 24, 25]\nservos = []\niteration = 0\nfor x in gpio:\n servos.append(AngularServo(gpio[iteration], min_angle=-45, max_angle=45))\n iteration += 1\n\nwhile True:\n servos[0].mid()\n servos[1].mid()\n servos[2].mid()\n servos[3].mid()\n servos[4].max()\n servos[5].min()\n servos[6].max()\n servos[7].min()\n time.sleep(1)","sub_path":"standby.py","file_name":"standby.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"153424862","text":"import json\n\nfrom rest_framework.test import APIRequestFactory\n\nfactory = APIRequestFactory()\nrequest = factory.post('/food-menu-items', json.dumps({\n \"food_item_name\": \"Snacks\",\n \"food_item_description\": \"Small bites\",\n \"food_item_price\": \"5.00\"\n}), content_type='application/json')","sub_path":"api_service/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"262998623","text":"import glob\nimport os\n\nfrom graphql.execution.base import ResolveInfo\n\nfrom dagster import check\nfrom dagster.core.definitions.schedule import ScheduleExecutionContext\nfrom dagster.core.errors import ScheduleExecutionError, user_code_error_boundary\nfrom dagster.core.storage.tags import check_tags\nfrom dagster.utils import merge_dicts\n\nfrom .utils import ExecutionMetadata, ExecutionParams, UserFacingGraphQLError, capture_dauphin_error\n\n\n@capture_dauphin_error\ndef get_scheduler_or_error(graphene_info):\n repository = graphene_info.context.get_repository()\n instance = graphene_info.context.instance\n if not instance.scheduler:\n raise UserFacingGraphQLError(graphene_info.schema.type_named('SchedulerNotDefinedError')())\n\n runningSchedules = [\n graphene_info.schema.type_named('RunningSchedule')(graphene_info, schedule=s)\n for s in instance.all_schedules(repository)\n ]\n\n return graphene_info.schema.type_named('Scheduler')(runningSchedules=runningSchedules)\n\n\n@capture_dauphin_error\ndef get_schedule_or_error(graphene_info, schedule_name):\n repository = graphene_info.context.get_repository()\n instance = graphene_info.context.instance\n schedule = instance.get_schedule_by_name(repository, schedule_name)\n\n if not schedule:\n raise UserFacingGraphQLError(\n graphene_info.schema.type_named('ScheduleNotFoundError')(schedule_name=schedule_name)\n )\n\n return graphene_info.schema.type_named('RunningSchedule')(graphene_info, schedule=schedule)\n\n\ndef execution_params_for_schedule(graphene_info, schedule_def, pipeline_def):\n schedule_context = ScheduleExecutionContext(\n graphene_info.context.instance, graphene_info.context.get_repository()\n )\n\n # Get environment_dict\n with user_code_error_boundary(\n ScheduleExecutionError,\n lambda: 'Error occurred during the execution of environment_dict_fn for schedule '\n '{schedule_name}'.format(schedule_name=schedule_def.name),\n ):\n environment_dict = schedule_def.get_environment_dict(schedule_context)\n\n # Get tags\n with user_code_error_boundary(\n ScheduleExecutionError,\n lambda: 'Error occurred during the execution of tags_fn for schedule '\n '{schedule_name}'.format(schedule_name=schedule_def.name),\n ):\n schedule_tags = schedule_def.get_tags(schedule_context)\n\n pipeline_tags = pipeline_def.tags or {}\n check_tags(pipeline_tags, 'pipeline_tags')\n tags = merge_dicts(pipeline_tags, schedule_tags)\n\n selector = schedule_def.selector\n mode = schedule_def.mode\n\n return ExecutionParams(\n selector=selector,\n environment_dict=environment_dict,\n mode=mode,\n execution_metadata=ExecutionMetadata(tags=tags, run_id=None),\n step_keys=None,\n previous_run_id=None,\n )\n\n\ndef get_scheduler_handle(graphene_info):\n scheduler_handle = graphene_info.context.scheduler_handle\n if not scheduler_handle:\n raise UserFacingGraphQLError(graphene_info.schema.type_named('SchedulerNotDefinedError')())\n\n return scheduler_handle\n\n\ndef get_dagster_schedule_def(graphene_info, schedule_name):\n check.inst_param(graphene_info, 'graphene_info', ResolveInfo)\n check.str_param(schedule_name, 'schedule_name')\n\n scheduler_handle = get_scheduler_handle(graphene_info)\n schedule_definition = scheduler_handle.get_schedule_def_by_name(schedule_name)\n if not schedule_definition:\n raise UserFacingGraphQLError(\n graphene_info.schema.type_named('ScheduleDefinitionNotFoundError')(\n schedule_name=schedule_name\n )\n )\n\n return schedule_definition\n\n\ndef get_dagster_schedule(graphene_info, schedule_name):\n check.inst_param(graphene_info, 'graphene_info', ResolveInfo)\n check.str_param(schedule_name, 'schedule_name')\n\n repository = graphene_info.context.get_repository()\n instance = graphene_info.context.instance\n if not instance.scheduler:\n raise UserFacingGraphQLError(graphene_info.schema.type_named('SchedulerNotDefinedError')())\n\n schedule = instance.get_schedule_by_name(repository, schedule_name)\n if not schedule:\n raise UserFacingGraphQLError(\n graphene_info.schema.type_named('ScheduleNotFoundError')(schedule_name=schedule_name)\n )\n\n return schedule\n\n\ndef get_schedule_attempt_filenames(graphene_info, schedule_name):\n instance = graphene_info.context.instance\n repository = graphene_info.context.get_repository()\n log_dir = instance.log_path_for_schedule(repository, schedule_name)\n return glob.glob(os.path.join(log_dir, \"*.result\"))\n","sub_path":"python_modules/dagster-graphql/dagster_graphql/implementation/fetch_schedules.py","file_name":"fetch_schedules.py","file_ext":"py","file_size_in_byte":4640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"530912122","text":"cont = totalced = 0\nced = 50\nprint('= ' * 15)\nprint(f'{\"￿MHILOCAS BANK￿￿\":^30}￿')\nprint('= ' * 15)\nvalor = int(input('Digite o valor para saque: '))\nprint('- ' * 5)\nwhile True:\n while valor >= ced:\n valor -= ced\n totalced += 1\n if valor < ced:\n if totalced != 0:\n print(f'{totalced} notas de R$ {ced}￿')\n if ced == 50:\n ced = 20\n elif ced == 20:\n ced = 10\n elif ced == 10:\n ced = 1\n totalced = 0\n if valor == 0:\n break\nprint('- ' * 5)\nprint('OBRIGADA, VOLTE SEMPRE')","sub_path":"Revisao Estruturas de Controle/ex071.py","file_name":"ex071.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"404327753","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport string\nimport random\nimport pymongo\nimport bson\nimport rsa\nfrom copy import deepcopy\n\nfrom tastypie.http import HttpUnauthorized, HttpForbidden, HttpNotFound, HttpNoContent\nfrom tastypie.resources import Resource\nfrom tastypie.authorization import Authorization\nfrom tastypie.authentication import Authentication\nfrom tastypie.exceptions import BadRequest\nfrom tastypie.validation import Validation\nfrom tastypie.bundle import Bundle\nfrom tastypie.utils import trailing_slash\nfrom tastypie import fields\nfrom django.conf.urls import url\nfrom django.conf import settings\n\nfrom beMerchant.settings import mongo, db \nfrom bson.objectid import ObjectId\n\nclass dictobj(object):\n def __init__(self, data=None):\n self.__dict__['data'] = data\n\n def __getattr__(self, key):\n value = self.__dict__['data'][key]\n if type(value) == type({}):\n return dictobj(value)\n\n def __getitem__(self, key):\n try:\n value = self.__dict__['data'][key]\n return value\n except:\n return None\n\n def __iter__(self):\n return self.__dict__['data'].iteritems()\n\n def next(self):\n if not self:\n raise StopIteration\n return dict(self)\n\n def __hash__(self):\n return hash(self.__dict__['data'].iteritems())\n\n def __eq__(self, other):\n return (self.name, self.location) == (other.name, other.location)\n\nclass SimpleAuthentication(Authentication):\n def is_authenticated(self, request, **kwargs):\n if(request.META.get('HTTP_COOKIE')):\n http_cookie = [x.strip() for x in request.META.get('HTTP_COOKIE').split(\";\")]\n logged_user = [x.split('=')[1] for x in http_cookie if 'SYSTEMCOOKIE' in x ][0]\n if request.session.get(logged_user, False):\n return True\n return False\n \nclass BasicResource( Resource ):\n class Meta:\n object_class = dictobj\n authentication = SimpleAuthentication()\n authorization = Authorization()\n always_return_data = True\n limit = 0\n max_limit = 0\n \n def __init__(self, api_name=None):\n super(Resource).__init__(Resource)\n self.collection_name = self._meta.mongo_collection or self._meta.resource_name \n self.collection = db[self.collection_name]\n self.fields = deepcopy(self.base_fields)\n \n def build_schema(self):\n base_schema = super(BasicResource, self).build_schema()\n base_schema['fields']['_id'] = {\n \"blank\": False,\n \"default\": \"No default provided.\",\n \"help_text\": \"Unicode string data. Ex: Hello World\",\n \"nullable\": False,\n \"readonly\": True,\n \"type\": \"hidden\",\n \"unique\": False,\n \"label\": \"No default provided.\"\n }\n if hasattr(self._meta,'field_order'):\n base_schema['field_order'] = self._meta.field_order\n if hasattr(self._meta,'fields'):\n for f in self._meta.fields:\n base_schema['fields'][f['key']] = {\n \"blank\": f['blank'] if 'blank' in f.keys() else False,\n \"default\": f['default'] if 'default' in f.keys() else \"No default provided.\",\n \"help_text\": f['help_text'] if 'help_text' in f.keys() else \"Unicode string data. Ex: Hello World\",\n \"nullable\": f['nullable'] if 'nullable' in f.keys() else False,\n \"readonly\": f['readonly'] if 'readonly' in f.keys() else True,\n \"type\": f['type'] if 'type' in f.keys() else \"hidden\",\n \"widget\": f['widget'] if 'widget' in f.keys() else \"hidden\",\n \"unique\": f['unique'] if 'unique' in f.keys() else False,\n \"label\": f['label'] if 'label' in f.keys() else \"No default provided.\"\n }\n base_schema['fields']['resource_uri'].update({\n \"type\": \"hidden\",\n })\n return base_schema\n\n def get_object_list( self, request ):\n return [dictobj(obj) for obj in self.collection.find(request.GET)]\n\n def obj_get_list(self, bundle, **kwargs):\n return self.get_object_list(bundle.request)\n \n def obj_get( self, request=None, **kwargs ):\n return dictobj(self.collection.find_one( {'_id':ObjectId(kwargs.get('pk'))} ))\n\n def obj_update( self, bundle, request=None, **kwargs ):\n return self.obj_create( bundle, request, **kwargs )\n\n def obj_create( self, bundle, request, **kwargs):\n if '_id' in bundle.data:\n self.collection.update_one({'_id': ObjectId( bundle.data.get('_id') ), \"$set\":bundle.data})\n else:\n self.collection.insert_one(bundle.data)\n bundle.obj = dictobj(bundle.data)\n return bundle\n\n def obj_delete_list(self, bundle, **kwargs):\n #TODO\n print(\"delete\")\n\n def obj_delete(self, bundle, **kwargs):\n deleted = self.collection.delete_one({ '_id': ObjectId(kwargs['pk']) })\n if(deleted.deleted_count > 0):\n return {\"success\":True}\n else:\n return {\"success\":False}\n \n def detail_uri_kwargs(self, bundle_or_obj):\n kwargs = {}\n if isinstance(bundle_or_obj, Bundle):\n kwargs['pk'] = bundle_or_obj.obj['_id']\n else:\n kwargs['pk'] = bundle_or_obj.obj['_id']\n return kwargs\n\n def delete_detail(self, request, **kwargs):\n bundle = Bundle(request=request)\n try: \n obj = self.obj_delete(bundle=bundle, **self.remove_api_resource_names(kwargs))\n return self.create_response(request, obj)\n except Exception as e:\n print(e)\n return self.create_response(request, {\n 'success': False\n }, HttpForbidden )\n\n def rollback( self, bundles ):\n pass\n\n def dehydrate( self, bundle ):\n if isinstance(bundle.obj, dictobj):\n for x,y in bundle.obj:\n bundle.data[x] = y\n return bundle\n","sub_path":"TastypieMongoAPI.py","file_name":"TastypieMongoAPI.py","file_ext":"py","file_size_in_byte":6087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"644834022","text":"# In order to implement a binary search our data has to be sorted first.\n# Algorithmic complexity: O(log n)\nimport random\n\ndef binary_search(list, start_index, end_index, objetive):\n\n if start_index > end_index:\n return False\n\n middle = (start_index + end_index) // 2\n\n if list[middle] == objetive:\n return True\n elif list[middle] < objetive:\n return binary_search(list, middle + 1, end_index, objetive)\n else:\n return binary_search(list, start_index, middle - 1, objetive)\n\n\nif __name__ == '__main__':\n list_size = int(input('what will be the size of your lis? '))\n objetive = int(input('what number do you want to search?? '))\n list = sorted([random.randint(0, 50) for i in range(list_size)])\n\n found = binary_search(list, 0, len(list), objetive)\n print(list)\n print('The item {} {} the list'.format(\n objetive, \"is in\" if found else \"is not in\"))\n","sub_path":"Search/Python/binary-search.py","file_name":"binary-search.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"340318264","text":"\"\"\"\nAvailable controls:\n\n*depends_on* implements relationship that will guarantee that depended action\non resource will be executed after parent, if parent will be executed. It means\nthat this control contributes only to ordering, and wont trigger any action\nif dependent resource isnt changed.\n\n depends_on:\n - parent:run -> ok -> dependent:run\n\n*react_on* - relationship which will guarantee that action on dependent resource\nwill be executed if parent action is going to be executed. This control will\ntrigger action even if no changes noticed on dependent resource.\n\n react_on:\n - parent:update -> ok -> dependent:update\n\"\"\"\n\nimport re\n\n\nclass Event(object):\n\n etype = None\n\n def __init__(self, parent_node, parent_action,\n state='', depend_node='', depend_action=''):\n self.parent_node = parent_node\n self.parent_action = parent_action\n self.state = state\n self.depend_node = depend_node\n self.depend_action = depend_action\n\n @property\n def parent(self):\n return '{}.{}'.format(self.parent_node, self.parent_action)\n\n @property\n def dependent(self):\n return '{}.{}'.format(self.depend_node, self.depend_action)\n\n def to_dict(self):\n rst = {'etype': self.etype}\n rst.update(self.__dict__)\n return rst\n\n def __eq__(self, inst):\n if inst.__class__ != self.__class__:\n return False\n return all((\n self.parent == inst.parent,\n self.state == inst.state,\n self.dependent == inst.dependent))\n\n def __repr__(self):\n return '{}: {} -> {} -> {}'.format(\n self.etype, self.parent, self.state, self.dependent)\n\n\nclass Dependency(Event):\n\n etype = 'depends_on'\n\n def insert(self, changed_resources, changes_graph):\n if (self.parent in changes_graph and\n self.dependent in changes_graph):\n changes_graph.add_edge(\n self.parent, self.dependent, state=self.state)\n\nDep = Dependency\n\nclass React(Event):\n\n etype = 'react_on'\n\n def insert(self, changed_resources, changes_graph):\n\n if self.parent in changes_graph:\n if self.dependent not in changes_graph:\n changes_graph.add_node(\n self.dependent, status='PENDING',\n errmsg=None, type='solar_resource',\n args=[self.depend_node, self.depend_action])\n\n changes_graph.add_edge(self.parent, self.dependent, state=self.state)\n changed_resources.append(self.depend_node)\n\n\nclass StateChange(Event):\n\n etype = 'state_change'\n\n def insert(self, changed_resources, changes_graph):\n changed_resources.append(self.parent)\n changes_graph.add_node(\n self.parent, status='PENDING',\n errmsg=None, type='solar_resource',\n args=[self.parent_node, self.parent_action])\n","sub_path":"solar/solar/events/controls.py","file_name":"controls.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"426697940","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nimport datetime\nnow = datetime.datetime.now()\nprint(\"+++++++++++++++ \",now,\" ++++++++++++++++\")\n\n# Yahoo Finance\ndef YahooScrape(code):\n if code > 9999:\n url = \"https://stocks.finance.yahoo.co.jp/stocks/detail/?code=\"+str(code)+\".O\"\n else:\n url = \"https://stocks.finance.yahoo.co.jp/stocks/detail/?code=\"+str(code)+\".T\"\n html = urlopen(url)\n bsObj = BeautifulSoup(html, \"html.parser\")\n row1 = bsObj.findAll(\"th\",{\"class\":\"symbol\"})[0]\n row2 = bsObj.findAll(\"td\",{\"class\":\"stoksPrice\"})[1]\n try:\n row3 = bsObj.findAll(\"span\",{\"class\":\"icoUpGreen yjMSt\"})[0]\n except:\n try:\n row3 = bsObj.findAll(\"span\",{\"class\":\"icoNoChange yjMSt\"})[0]\n except:\n row3 = bsObj.findAll(\"span\",{\"class\":\"icoDownRed yjMSt\"})[0]\n if code > 9999:\n srt = \"<< \"+row1.get_text()+str(code)+\" >>\"\n print(srt.ljust(30),\"\\t\",row2.get_text(),\" -- \",row3.get_text())\n else:\n row4 = \"{:,}\".format(round(int(bsObj.findAll(\"dl\",{\"class\":\"tseDtl\"},\"dd\")[3].find(\"strong\").get_text().replace(\",\",\"\"))/100))\n print(\"<< \",row1.get_text(),code,\" >>\\n\",row2.get_text(),\" -- \",row3.get_text(),\"-- 時価\",row4)\n return row2.get_text()\n\n#+++ ドル円\nhtml = urlopen(\"https://stocks.finance.yahoo.co.jp/stocks/detail/?code=usdjpy=x\")\nbsObj = BeautifulSoup(html, \"html.parser\")\nrow1 = bsObj.findAll(\"th\",{\"class\":\"symbol\"})[0]\nrow2 = bsObj.findAll(\"td\",{\"class\":\"stoksPrice\"})[0]\nprint(\"<< \",row1.get_text(),\" >>\\t\",row2.get_text())\n\n#+++ 日経平均先物\nYahooScrape(5040469)\n\n#+++ 日経平均株価\nYahooScrape(998407)\n\n#+++ 1570 ETFレバ\na=int(YahooScrape(1570).replace(\",\",\"\").replace(\"---\",\"0\"))\n\n#+++ 5484 東北特殊鋼\na=int(YahooScrape(5484).replace(\",\",\"\").replace(\"---\",\"0\"))\nhoge1=(a-1959)*200\nprint(\"----\",hoge1)\n\n#+++ 7177 GMOFHD\na=int(YahooScrape(7177).replace(\",\",\"\").replace(\"---\",\"0\"))\nhoge2=(a-755)*100\nprint(\"----\",hoge2)\n\n#+++ 2654 アスモ\na=int(YahooScrape(2654).replace(\",\",\"\").replace(\"---\",\"0\"))\nhoge3=(a-666)*100\nprint(\"----\",hoge3)\n\n#+++ 3034 クオール\n#YahooScrape(3034)\n\n#+++ 2146 UT-GROUP 9366 サンリツ\nYahooScrape(6785)\n\n#+++ 9449 GMO\nYahooScrape(9449)\n\n#+++ 4755 楽天\nYahooScrape(4755).replace(\",\",\"\").replace(\"---\",\"0\").replace(\",\",\"\").replace(\"---\",\"0\")\n\n#+++ 6785 鈴木 8267 AEON 2146 T-GROUP 2154 トラストテック\nYahooScrape(8267).replace(\",\",\"\").replace(\"---\",\"0\").replace(\",\",\"\").replace(\"---\",\"0\")\n\n#+++ 4977 新田ゼラチン 4102 丸尾カルシウム\na=int(YahooScrape(4977).replace(\",\",\"\").replace(\"---\",\"0\").replace(\",\",\"\").replace(\"---\",\"0\"))\ngeho1=(a-787)*300\nprint(\"----\",geho1)\n\n#+++ 6857 あどてすと\na=int(YahooScrape(6857).replace(\",\",\"\").replace(\"---\",\"0\").replace(\",\",\"\").replace(\"---\",\"0\"))\ngeho2=(2276-a)*200\nprint(\"----\",geho2)\n\ntmp=geho1+geho2\ntmp2=hoge1+hoge2+hoge3\ntmp3=tmp+tmp2\nprint(\"+++++++++ 信:\",tmp,\" 現:\",tmp2,\" 総:\",tmp3)\n\n# kabutan\nprint(\"------------------------------------------------------------\")\nhtml = urlopen(\"https://kabutan.jp/news/\")\nbsObj = BeautifulSoup(html,\"html.parser\")\ntable = bsObj.findAll(\"table\",{\"class\":\"s_news_list\"},\"tr\")\nrow_time = table[0].findAll(\"td\",{\"class\":\"news_time\"})\nrows = table[0].findAll({\"a\":\"href\"})\n\nfor x in range(10):\n name = rows[x].get_text().split(\"、\")[0]\n article = rows[x].get_text().split(\"、\")[1]\n print(row_time[x].get_text(),name.ljust(10),\"\\t\",article)\n\n# Yahoo News\nprint(\"------------------------------------------------------------\")\nhtml = urlopen(\"https://news.yahoo.co.jp/flash\")\nbsObj = BeautifulSoup(html,\"html.parser\")\ntable = bsObj.findAll(\"ul\",{\"class\":\"listBd\"},{\"class\":\"ttl\"})\nrows = table[0].findAll(\"li\")\n\nprint(\"<< Yahoo!News -- 速報 >>\")\nfor x in range(10):\n row = rows[x].findAll({\"a\":\"href\"})[-1]\n print(row.get_text().replace('\\n',''))\n\n\n# Sankei News\nprint(\"------------------------------------------------------------\")\ntry:\n html = urlopen(\"http://www.sankei.com/flash/newslist/flash-n1.html\")\n bsObj = BeautifulSoup(html,\"html.parser\")\n table = bsObj.findAll(\"section\",{\"class\":\"indexText clearfix\"})\n rows = table[0].findAll(\"li\")\n \n print(\"<< 産経ニュース -- 速報 >>\")\n for x in range(10):\n row = rows[x].findAll({\"a\":\"href\"})[-1]\n print(row.get_text())\n \nexcept:\n print(\"産経 Error\")\n# 2nn News\nprint(\"------------------------------------------------------------\")\ntry:\n html = urlopen(\"https://www.2nn.jp/newsplus/\")\n bsObj = BeautifulSoup(html,\"html.parser\")\n table = bsObj.findAll(\"li\",{\"data-twitter\":\"2nn_newsplus\"})\n \n print(\"<< ニュース速報 >>\")\n for x in range(10):\n row = table[x].find({\"a\":\"class\"})\n print(row.get_text())\nexcept:\n print(\"2nnNews Error\")\n\n\n# 毎日新聞 マーケット\nprint(\"------------------------------------------------------------\")\ntry:\n html = urlopen(\"https://mainichi.jp/stock/\")\n bsObj = BeautifulSoup(html,\"html.parser\")\n table = bsObj.findAll(\"ul\",{\"class\":\"list-typeD\"},\"span\")\n rows = table[0].findAll(\"span\",{\"class\":\"midashi\"})\n print(\"<< 毎日新聞 >>\")\n for x in range(5):\n print(rows[x].get_text())\nexcept:\n print(\"毎日新聞 Error\")\n\n\n# NikkeiScrape\ndef NikkeiScrape(url):\n html = urlopen(\"https://www.nikkei.com/news/category/\"+url+\"/\")\n bsObj = BeautifulSoup(html, \"html.parser\")\n table = bsObj.findAll(\"h4\",{\"class\":\"cmn-article_title\"})\n table = bsObj.findAll(\"h3\",{\"class\":\"m-miM09_title\"})\n for x in range(10):\n print(table[x].get_text().replace(\"\\n\",\"\"))\n\n#+++ 日経新聞 株・金融\nprint(\"------------------------------------------------------------\")\ntry:\n print(\"<< 日経新聞 株・金融>>\")\n NikkeiScrape(\"markets\")\nexcept:\n print(\"日経 株・金融 Error\")\n#+++ 日経新聞 経済\nprint(\"------------------------------------------------------------\")\ntry:\n print(\"<< 日経新聞 経済 >>\")\n NikkeiScrape(\"economy\")\nexcept:\n print(\"日経 経済 Error\")\n#+++ 日経新聞 政治\nprint(\"------------------------------------------------------------\")\ntry:\n print(\"<< 日経新聞 政治 >>\")\n NikkeiScrape(\"politics\")\nexcept:\n print(\"日経 政治 Error\")\nprint(\"------------------------------------------------------------\")\n","sub_path":"news_scrape.py","file_name":"news_scrape.py","file_ext":"py","file_size_in_byte":6322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"621813636","text":"# Brady Robinson\n\n\"\"\"\nCOMS W4705 - Natural Language Processing - Summer 19 \nHomework 2 - Parsing with Context Free Grammars \nDaniel Bauer\n\"\"\"\n\nimport sys\nfrom collections import defaultdict\nfrom math import fsum\n\nclass Pcfg(object): \n \"\"\"\n Represent a probabilistic context free grammar. \n \"\"\"\n\n def __init__(self, grammar_file):\n self.rhs_to_rules = defaultdict(list)\n self.lhs_to_rules = defaultdict(list)\n self.startsymbol = None \n self.read_rules(grammar_file)\n self.print_whether_valid()\n \n def read_rules(self,grammar_file):\n for line in grammar_file: \n line = line.strip()\n if line and not line.startswith(\"#\"):\n if \"->\" in line: \n rule = self.parse_rule(line.strip())\n lhs, rhs, prob = rule\n self.rhs_to_rules[rhs].append(rule)\n self.lhs_to_rules[lhs].append(rule)\n else: \n startsymbol, prob = line.rsplit(\";\")\n self.startsymbol = startsymbol.strip()\n \n \n def parse_rule(self,rule_s):\n lhs, other = rule_s.split(\"->\")\n lhs = lhs.strip()\n rhs_s, prob_s = other.rsplit(\";\",1) \n prob = float(prob_s)\n rhs = tuple(rhs_s.strip().split())\n return (lhs, rhs, prob)\n\n def verify_grammar(self):\n \"\"\"\n Return True if the grammar is a valid PCFG in CNF.\n Otherwise return False. \n \"\"\"\n # TODO, Part 1\n terminal_symbols = []\n non_terminal_symbols = []\n probability_list = []\n probability_total = 0\n\n for value in self.lhs_to_rules.values():\n if value[0][0] == value[0][0].upper() and len(value[0][0].split()) == 1:\n non_terminal_symbols.append(value[0][0])\n else:\n return False\n\n for value in self.lhs_to_rules.values():\n for element in value:\n for symbol in element[1]:\n if symbol == symbol.lower():\n terminal_symbols.append(symbol)\n else:\n non_terminal_symbols.append(symbol)\n\n for value in self.lhs_to_rules.values():\n for element in value:\n if len(element[1]) > 2 or len(element[1]) < 1:\n return False\n elif len(element[1]) == 1 and (element[1][0] in non_terminal_symbols):\n return False\n elif len(element[1]) == 2 and (element[1][0] == element[1][0].lower() or element[1][1] == element[1][1].lower()):\n return False\n\n for key in self.lhs_to_rules:\n if len(self.lhs_to_rules[key]) == 1:\n if self.lhs_to_rules[key][0][2] != 1.0:\n return False\n\n for value in self.lhs_to_rules.values():\n if probability_total > 0:\n probability_list.append(probability_total)\n probability_total = 0\n current_key = value[0][0]\n for element in value:\n if element[0] == current_key:\n probability_total = probability_total + element[2]\n\n for probability in probability_list:\n if probability > 1.0001 or probability < 0.9999:\n return False\n\n return True\n\n def print_whether_valid(self):\n if self.verify_grammar():\n print(\"-> This grammar is valid according to Chomsky normal form!\")\n else:\n print(\"-> Error. This grammar is invalid according to Chomsky normal form!\")\n\n\nif __name__ == \"__main__\":\n with open(sys.argv[1],'r') as grammar_file:\n grammar = Pcfg(grammar_file)\n # grammar\n # print(grammar.lhs_to_rules['NP'])\n # grammar.verify_grammar()\n\n","sub_path":"cfg_parser/grammar.py","file_name":"grammar.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"471950389","text":"#################################################\n# Hw2\n#################################################\n\nfrom cs112_f16_wk2 import assertEqual, assertAlmostEqual, lintAll, testAll\nimport math\n\n#################################################\n# Helper functions\n#################################################\n\ndef almostEqual(d1, d2, epsilon=10**-7):\n # note: use math.isclose() outside 15-112 with Python version 3.5 or later\n return (abs(d2 - d1) < epsilon)\n\nimport decimal\ndef roundHalfUp(d):\n # Round to nearest with ties going away from zero.\n rounding = decimal.ROUND_HALF_UP\n # See other rounding options here:\n # https://docs.python.org/3/library/decimal.html#rounding-modes\n return int(decimal.Decimal(d).to_integral_value(rounding=rounding))\n\n#################################################\n# Problems\n#################################################\n\ndef isPrime(n):\n if (n == 2):\n return True\n if (n < 2 or n % 2 == 0):\n return False\n max = roundHalfUp(n**0.5)\n for i in range(3,max+1,2):\n if (n % i == 0):\n return False\n return True\n\ndef nthPrime(n):\n counter = -1\n number = 1\n while (counter < n):\n number += 1\n if (isPrime(number)):\n counter += 1\n return number\n\ndef getKthDigit(n, k):\n n = abs(n)\n return (n // (10**k) % 10)\n\ndef setKthDigit(n, k, d):\n if (n < 0):\n n = abs(n)\n n = n - getKthDigit(n, k) * 10**k + (d * 10**k)\n return -n\n else:\n return n - getKthDigit(n, k) * 10**k + (d * 10**k)\n\ndef digitCount(n):\n digits = 1\n n = abs(n)\n while (n // 10 > 0):\n digits += 1\n n = n // 10\n return digits\n\ndef sumOfDigits(n):\n digits = digitCount(n)\n sumdigits = 0\n for x in range(0, digits):\n sumdigits += n % 10\n n = n // 10\n return sumdigits\n\ndef sumOfSquaresOfDigits(n):\n digits = digitCount(n)\n sumdigits = 0\n for x in range(0, digits):\n sumdigits += (n % 10)**2\n n = n // 10\n return sumdigits\n\ndef isHappyNumber(n):\n if (n < 1):\n return False\n lol = sumOfSquaresOfDigits(n)\n while (lol != 4 and lol != 1):\n lol = sumOfSquaresOfDigits(lol)\n return lol == 1\n \ndef nthHappyNumber(n):\n counter = -1\n number = -1\n while (counter < n):\n number += 1\n if (isHappyNumber(number)):\n counter += 1\n return number\n\ndef isHappyPrime(n):\n return isHappyNumber(n) and isPrime(n)\n\ndef nthHappyPrime(n):\n counter = -1\n num = -1\n while (counter < n):\n num += 1\n if (isHappyPrime(num)):\n counter += 1\n return num\n\ndef isKaprekarNumber(n):\n square = n**2\n digits = digitCount(square)\n lmao = False\n for x in range(0, digits+1):\n if (square // 10**x + square % 10**x == n):\n if (square % 10**x > 0):\n lmao = True\n break\n return lmao\n\ndef nearestKaprekarNumber(n):\n up = math.ceil(n)\n down = math.floor(n)\n while (not isKaprekarNumber(up) and not isKaprekarNumber(down)):\n up += 1\n down -= 1\n if (isKaprekarNumber(up) and not isKaprekarNumber(down)):\n return up\n elif (isKaprekarNumber(up) and isKaprekarNumber(down)):\n if (n - math.floor(n) <= 0.5):\n return down\n else:\n return up\n else:\n return down\n\ndef carrylessAdd(x, y):\n xdigits = digitCount(x)\n ydigits = digitCount(y)\n sumc = 0\n if (xdigits < ydigits):\n for i in range(ydigits):\n sumc = setKthDigit(sumc, i,\n (getKthDigit(x, i) + getKthDigit(y, i)) % 10)\n else:\n for k in range(xdigits):\n sumc = setKthDigit(sumc, k,\n (getKthDigit(x, k) + getKthDigit(y, k)) % 10)\n return sumc\n\ndef carrylessMultiply(x, y):\n xdigits = digitCount(x)\n ydigits = digitCount(y)\n summ = 0\n total = 0\n for i in range(ydigits):\n summ = 0\n for j in range(xdigits):\n summ = setKthDigit(summ, j,\n (getKthDigit(x, j) * getKthDigit(y, i)) % 10)\n total = carrylessAdd(total, summ * 10**i)\n return total\n\ndef sumOfPrimeDigits(n):\n sum = 0\n prime = 0\n while (n > 1):\n lol = nthPrime(prime)\n if (n % lol == 0):\n n /= lol\n sum += sumOfDigits(lol)\n else:\n prime += 1\n return sum\n\ndef nthSmithNumber(n):\n counter = -1\n number = 3\n while (counter < n):\n number += 1\n if (not isPrime(number) and\n sumOfDigits(number) == sumOfPrimeDigits(number)):\n counter += 1\n return number\n\n###### BONUS #######\n\ndef isWeaklyPrime(n):\n if (not isPrime(n)):\n return False\n digits = digitCount(n)\n for i in range(digits):\n for j in range(10):\n if (setKthDigit(n, i, j) != n and isPrime(setKthDigit(n, i, j))):\n return False\n return True\n\ndef nthWeaklyPrime(n):\n counter = -1\n number = 294000\n while (counter < n):\n number += 1\n if (isWeaklyPrime(number)):\n counter += 1\n return number\n\ndef makeBoard(moves):\n board = 0\n for i in range(moves):\n board += (8 * 10**i)\n return board\n\ndef kthDigit(n, k):\n n = abs(n)\n return (n // (10**k) % 10)\n\ndef replaceKthDigit(n, k, d):\n if (n < 0):\n n = abs(n)\n n = n - kthDigit(n, k) * 10**k + (d * 10**k)\n return -n\n else:\n return n - kthDigit(n, k) * 10**k + (d * 10**k)\n\ndef getLeftmostDigit(n):\n return kthDigit(n, digitCount(n)-1)\n\ndef clearLeftmostDigit(n):\n return replaceKthDigit(n, digitCount(n)-1, 0)\n\ndef isWin(board):\n for i in range(digitCount(board)-2):\n if (kthDigit(board, i) == 2 and kthDigit(board, i+1) == 1\n and kthDigit(board, i+2) == 1):\n return True\n return False\n\ndef isFull(board):\n for i in range(digitCount(board)):\n if (kthDigit(board, i) == 8):\n return False\n return True\n\ndef play112(game):\n sizeOfBoard = getLeftmostDigit(game)\n board = makeBoard(sizeOfBoard)\n game = clearLeftmostDigit(game)\n player = 0\n moves = digitCount(game) // 2\n while (game > 0):\n moves -= 1\n player = 2 - (player + 1) % 2\n position = getLeftmostDigit(game)\n move = getLeftmostDigit(clearLeftmostDigit(game))\n if (move > 2):\n b = str(board)\n p = str(player)\n return b + \": Player \" + p + \": move must be 1 or 2!\"\n elif (position > digitCount(board)):\n return str(board) + \": Player \" + str(player) + \": offboard!\"\n elif (kthDigit(board, digitCount(board)-position) != 8):\n return str(board) + \": Player \" + str(player) + \": occupied!\"\n else:\n board = replaceKthDigit(board, digitCount(board)-position, move)\n if (isWin(board)):\n return str(board) + \": Player \" + str(player) + \" wins!\"\n elif (isFull(board)):\n return str(board) + \": Tie!\"\n else:\n game = game % 100**moves\n return str(board) + \": Unfinished!\"\n\n#################################################\n# Test Functions\n#################################################\n\ndef testSumOfSquaresOfDigits():\n print(\"Testing sumOfSquaresOfDigits()...\", end=\"\")\n assertEqual(sumOfSquaresOfDigits(5), 25)\n assertEqual(sumOfSquaresOfDigits(12), 5)\n assertEqual(sumOfSquaresOfDigits(234), 29)\n print(\"Passed all tests!\")\n\ndef testIsHappyNumber():\n print(\"Testing isHappyNumber()...\", end=\"\")\n assertEqual(isHappyNumber(-7), False)\n assertEqual(isHappyNumber(1), True)\n assertEqual(isHappyNumber(2), False)\n assertEqual(isHappyNumber(97), True)\n assertEqual(isHappyNumber(98), False)\n assertEqual(isHappyNumber(404), True)\n assertEqual(isHappyNumber(405), False)\n print(\"Passed all tests!\")\n\ndef testNthHappyNumber():\n print(\"Testing nthHappyNumber()...\", end=\"\")\n assertEqual(nthHappyNumber(0), 1)\n assertEqual(nthHappyNumber(1), 7)\n assertEqual(nthHappyNumber(2), 10)\n assertEqual(nthHappyNumber(3), 13)\n assertEqual(nthHappyNumber(4), 19)\n assertEqual(nthHappyNumber(5), 23)\n assertEqual(nthHappyNumber(6), 28)\n assertEqual(nthHappyNumber(7), 31)\n print(\"Passed all tests!\")\n\ndef testIsHappyPrime():\n print(\"Testing isHappyPrime()...\", end=\"\")\n assertEqual(isHappyPrime(1), False)\n assertEqual(isHappyPrime(2), False)\n assertEqual(isHappyPrime(3), False)\n assertEqual(isHappyPrime(7), True)\n assertEqual(isHappyPrime(10), False)\n assertEqual(isHappyNumber(13), True)\n print(\"Passed all tests!\")\n\ndef testNthHappyPrime():\n print(\"Testing nthHappyPrime...\", end=\"\")\n assertEqual(nthHappyPrime(0), 7)\n assertEqual(nthHappyPrime(1), 13)\n assertEqual(nthHappyPrime(2), 19)\n assertEqual(nthHappyPrime(3), 23)\n assertEqual(nthHappyPrime(4), 31)\n assertEqual(nthHappyPrime(10), 167)\n assertEqual(nthHappyPrime(20), 397)\n print(\"Passed all tests!\")\n\ndef testNearestKaprekarNumber():\n print(\"Testing nearestKaprekarNumber()...\", end=\"\")\n assertEqual(nearestKaprekarNumber(1), 1)\n assertEqual(nearestKaprekarNumber(0), 1)\n assertEqual(nearestKaprekarNumber(-1), 1)\n assertEqual(nearestKaprekarNumber(-2), 1)\n assertEqual(nearestKaprekarNumber(-12345), 1)\n assertEqual(nearestKaprekarNumber(1.234), 1)\n assertEqual(nearestKaprekarNumber(4.99999999), 1)\n assertEqual(nearestKaprekarNumber(5), 1)\n assertEqual(nearestKaprekarNumber(5.00000001), 9)\n assertEqual(nearestKaprekarNumber(27), 9)\n assertEqual(nearestKaprekarNumber(28), 45)\n assertEqual(nearestKaprekarNumber(45), 45)\n assertEqual(nearestKaprekarNumber(50), 45)\n assertEqual(nearestKaprekarNumber(51), 55)\n assertEqual(nearestKaprekarNumber(1611), 999)\n assertEqual(nearestKaprekarNumber(1612), 2223)\n assertEqual(nearestKaprekarNumber(2475.4), 2223)\n assertEqual(nearestKaprekarNumber(2475.5), 2223)\n assertEqual(nearestKaprekarNumber(2475.51), 2728)\n assertEqual(nearestKaprekarNumber(2475.6), 2728)\n #kaps = [1, 9, 45, 55, 99, 297, 703, 999, 2223, 2728]\n #bigKaps = [994708, 999999]\n assertEqual(nearestKaprekarNumber(995123), 994708)\n assertEqual(nearestKaprekarNumber(9376543), 9372385)\n assertEqual(nearestKaprekarNumber(13641234), 13641364)\n print(\"Passed!\")\n\ndef testCarrylessMultiply():\n print(\"Testing carrylessMultiply()...\", end=\"\")\n assertEqual(carrylessMultiply(643, 59), 417)\n assertEqual(carrylessMultiply(6412, 387), 807234)\n print(\"Passed!\")\n\ndef testNthSmithNumber():\n print('Testing nthSmithNumber()... ', end='')\n assertEqual(nthSmithNumber(0), 4)\n assertEqual(nthSmithNumber(1), 22)\n assertEqual(nthSmithNumber(2), 27)\n assertEqual(nthSmithNumber(3), 58)\n assertEqual(nthSmithNumber(4), 85)\n assertEqual(nthSmithNumber(5), 94)\n assertEqual(nthSmithNumber(6), 121)\n print('Passed.')\n\ndef testNthWeaklyPrime():\n print(\"Testing carrylessMultiply()...\", end=\"\")\n assertEqual(nthWeaklyPrime(0), 294001)\n assertEqual(nthWeaklyPrime(1), 505447)\n assertEqual(nthWeaklyPrime(2), 584141)\n print(\"Passed!\")\n\ndef testPlay112():\n print(\"Testing play112()...\", end=\"\")\n assertEqual(play112( 5 ), \"88888: Unfinished!\")\n assertEqual(play112( 521 ), \"81888: Unfinished!\")\n assertEqual(play112( 52112 ), \"21888: Unfinished!\")\n assertEqual(play112( 5211231 ), \"21188: Unfinished!\")\n assertEqual(play112( 521123142 ), \"21128: Player 2 wins!\")\n assertEqual(play112( 521123151 ), \"21181: Unfinished!\")\n assertEqual(play112( 52112315142 ), \"21121: Player 1 wins!\")\n assertEqual(play112( 523 ), \"88888: Player 1: move must be 1 or 2!\")\n assertEqual(play112( 51223 ), \"28888: Player 2: move must be 1 or 2!\")\n assertEqual(play112( 51211 ), \"28888: Player 2: occupied!\")\n assertEqual(play112( 5122221 ), \"22888: Player 1: occupied!\")\n assertEqual(play112( 51261 ), \"28888: Player 2: offboard!\")\n assertEqual(play112( 51122324152 ), \"12212: Tie!\")\n print(\"Passed!\")\n\n#################################################\n# Main\n#################################################\n\ndef main():\n testAll(\n testSumOfSquaresOfDigits,\n testIsHappyNumber,\n testNthHappyNumber,\n testNthHappyPrime,\n testNearestKaprekarNumber,\n testCarrylessMultiply,\n testNthSmithNumber,\n # bonus: (uncomment these to test them....)\n testNthWeaklyPrime,\n testPlay112,\n )\n\nif __name__ == '__main__':\n main()\n","sub_path":"Homework/hw2.py","file_name":"hw2.py","file_ext":"py","file_size_in_byte":12574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"96800091","text":"import collections\nimport json\nimport random\n\nfrom clean_documents import clean_documents\n\ndef cutoff_list(list_, divider):\n if divider < 1:\n divider = 1\n SplitList = collections.namedtuple(\n 'SplitList',\n [\n 'listA',\n 'listB'\n ]\n )\n cutoff_index = int((len(list_) - (len(list_) / divider)) - 1)\n return SplitList(list_[:cutoff_index], list_[cutoff_index:])\n\n\ndef get_data(path):\n\n with open(path) as events_file:\n events = json.load(events_file)\n\n random.shuffle(events)\n\n data = {\n 'train': {\n 'documents': [],\n 'labels': []\n },\n 'test': {\n 'documents': [],\n 'labels': []\n },\n 'unlabeled': []\n }\n\n music, unlabeled, not_music = [], [], []\n for event in events:\n if 'isMusic' not in event:\n unlabeled.append(event['description'])\n elif event['isMusic']:\n music.append(event['description'])\n else:\n not_music.append(event['description'])\n\n music = clean_documents(music)\n not_music = clean_documents(not_music)\n\n splitted_music = cutoff_list(music, 4)\n splitted_not_music = cutoff_list(not_music, 4)\n\n data['train']['documents'] = (\n splitted_music[0] + splitted_not_music[0]\n )\n data['train']['labels'] = (\n (['music'] * len(splitted_music[0])) +\n (['not_music'] * len(splitted_not_music[0]))\n )\n data['test']['documents'] = (\n splitted_music[1] + splitted_not_music[1]\n )\n data['test']['labels'] = (\n (['music'] * len(splitted_music[1])) +\n (['not_music'] * len(splitted_not_music[1]))\n )\n data['unlabeled'] = unlabeled\n\n return data\n","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"288824529","text":"\r\n\"\"\"\r\n图片转字符画\r\n\"\"\"\r\nfrom PIL import Image\r\n\r\nascii_char = list(\"$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\\|()1{}[]?-_+~<>i!lI;:,\\\"^`'. \")\r\n\r\n# 将256灰度值 映射的到70个字符上\r\ndef get_char(r,g,b,alpha=256): # alpja 透明度\r\n if alpha==0:\r\n return \"\"\r\n length = len(ascii_char) # 计算出所有的字符的总长度\r\n gray = int(0.2126*r+0.7152*g+0.0722*b) # 根据公式计算灰度\r\n # 映射\r\n unit =(256.0+1)/length # 为后面取下标做准备\r\n return ascii_char[int(gray/unit)] # 不同的灰度值 对应不同的字符\r\n # 通过灰度来区分色块\r\n\r\n\r\nif __name__ == '__main__':\r\n WIDTH = 60\r\n HEIGHT = 45\r\n size=(WIDTH,HEIGHT)\r\n #file_path = input(\"input the image you want to cut :\\n\")\r\n file_path=\"1.jpg\"\r\n image=Image.open(file_path)\r\n image =image.resize(size,Image.NEAREST)#重置图片大小\r\n txt=''\r\n for i in range(HEIGHT): #\r\n for j in range(WIDTH):\r\n txt+=get_char(*image.getpixel((j,i))) # 返回自定位置的像素 (R, G, B, A)\r\n txt+='\\n'\r\n print(txt)\r\n # 写入文件\r\n with open('oytput.txt','w') as f:\r\n f.write(txt)","sub_path":"character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"422271047","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 5 16:02:31 2020\n\n@author: Sergey\n\"\"\"\nimport sys\nimport numpy as np\nimport tensorflow as tf\nfrom CNNHelper.ResNET import ResNet50\nfrom CNNHelper.VGG19 import VGG19\n\nfrom datetime import datetime\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom Core.DataHandler import DataLoader\nimport sklearn.model_selection as sk\nimport matplotlib.pyplot as plt\n\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n try:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n \n except RuntimeError as e:\n print(e)\n# def weighted_accuracy(y_true, y_pred):\n# w = tf.constant([0, 1 , 1])\n \n \n# # cross_entropy = tf.reduce_mean( -tf.reduce_sum(w*y_true*tf.math.log(tf.clip_by_value(y_pred,1e-10,1.0)),axis = -1))\n# return acc\n\n\ndef weighted_categorical_crossentropy(y_true, y_pred):\n w = tf.constant([1/0.92, 1/0.04 , 1/0.04])\n cross_entropy = tf.reduce_mean( -tf.reduce_sum(w*y_true*tf.math.log(tf.clip_by_value(y_pred,1e-10,1.0)),axis = -1))\n return cross_entropy\n\nmodel =VGG19(3)\n# model =ResNet50(3)\nopt = keras.optimizers.Adam(learning_rate=0.0001)\n\n# opt = keras.optimizers.SGD(learning_rate=1,momentum=0.001)\nmodel.compile(optimizer =opt, loss=\"categorical_crossentropy\", metrics='accuracy')\n# model.compile(optimizer =opt, loss=weighted_categorical_crossentropy, metrics='accuracy')\n\nmodel.summary()\n\n\ndt = DataLoader()\nX_Data ,Y_Data = dt.LoadTrainingData(\"D:\\Sergey\\FluorocodeMain\\FluorocodeMain\\DataForTraining.npz\")\nx_v ,y_v = dt.LoadTrainingData(\"D:\\Sergey\\FluorocodeMain\\FluorocodeMain\\DataForValidation.npz\")\n\n\nhistory=model.fit(X_Data , Y_Data , batch_size =4, shuffle=True, epochs=100, validation_data = (x_v,y_v))\n\n\nplt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.title('Model accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.ylim(0.8,1)\nplt.show()\n# summarize history for loss\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['train', 'test'], loc='upper left')\nplt.ylim(0,0.3)\nplt.show()","sub_path":"mainTrain.py","file_name":"mainTrain.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"416417371","text":"\n################ Input parameters for DFT+DMFT calculations ##################\n\nparams = {\"SC\":\t [0,\t\t\"#SC state or not\"],\n \"dc_type\": [3, \"# dc type\"],\n \"U\": [[4.0], \"# Coulomb repulsion (F0)\"],\n \"J\": [[0.8], \"# Hund's coupling\"],\n \"mu\": [3.71997927371, \"# The chemical potential (reads from dm.out if 0)\"], \n \"atomnames\": [['V','O'], \"# The name of atoms\"],\n \"cor_at\": [[['V1']], \"# Correlated atoms, put degenerate atoms in the same list\"],\n \"cor_orb\": [[[['d_yz','d_xz','d_xy']]], \"# DMFT orbitals, other orbitals are treated by HF\"],\n \"Nd_qmc\": [False, \"# DMFT Nd values are obtained from QMC sampling\"],\n \"print_at\": [['V'], \"# The local Green functions are printed\"],\n \"co_at\": [[0.5], \"# The coefficient of Nd for each atom\"],\n\t \"broaden\": [0.1,\t\t \"# boardening for real axis Green function\"],\n\t \"l\"\t: [2, \t\t \"# angular momentum\"],\n\t \"cx\":\t\t[0.0,\t\t \"# spin-orbit coupling\"],\n\t \"Eimp\":\t[[0],\t \"# impurity energies\"],\n\t \"Utype\":\t['SlaterKanamori',\t\"\"],\n\t \"kpath\":\t[[[0,0,0],[0.5,0.,0],[0.5,0.5,0],[0,0,0],[0.5,0.5,0.5]],\t\t\t\"high sym points of kpath\"],\n }\n\n","sub_path":"DMFT_yjzhou/kpath_param.py","file_name":"kpath_param.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"301794154","text":"# import flask\r\nimport requests\r\nimport json\r\n\r\n# 查询接口\r\ndef getGegu(gg_code):\r\n gegu = requests.get('http://hq.sinajs.cn/list=' + gg_code)\r\n gegu_status = gegu.status_code\r\n if gegu_status != 200:\r\n return False\r\n allinfo = gegu.text.split('\"')[1].split(',')\r\n # print(allinfo)\r\n gegu_name = allinfo[0]\r\n # 今开\r\n gegu_jinkai = allinfo[1]\r\n # 昨收\r\n gegu_zuoshou = allinfo[2]\r\n # 现价\r\n gegu_xianjia = allinfo[3]\r\n # 今高\r\n gegu_jingao = allinfo[4]\r\n # 今低\r\n gegu_jindi = allinfo[5]\r\n # 竞买\r\n gegu_jingmai = allinfo[6]\r\n # 竞卖\r\n gegu_jingnai = allinfo[7]\r\n # 成交数\r\n gegu_chengjiaoshu = allinfo[8]\r\n # 成交额\r\n gegu_chengjiaoe = allinfo[9]\r\n # 买1\r\n gegu_mai1 = allinfo[10]\r\n # 买1价\r\n gegu_mai1p = allinfo[11]\r\n # 买2\r\n gegu_mai2 = allinfo[12]\r\n # 买2价\r\n gegu_mai2p = allinfo[13]\r\n # 买3\r\n gegu_mai3 = allinfo[14]\r\n # 买3价\r\n gegu_mai3p = allinfo[15]\r\n # 买4\r\n gegu_mai4 = allinfo[16]\r\n # 买4价\r\n gegu_mai4p = allinfo[17]\r\n # 买5\r\n gegu_mai5 = allinfo[18]\r\n # 买5价\r\n gegu_mai5p = allinfo[19]\r\n # 卖1\r\n gegu_nai1 = allinfo[20]\r\n # 卖1价\r\n gegu_nai1p = allinfo[21]\r\n # 卖2\r\n gegu_nai2 = allinfo[22]\r\n # 卖2价\r\n gegu_nai2p = allinfo[23]\r\n # 卖3\r\n gegu_nai3 = allinfo[24]\r\n # 卖3价\r\n gegu_nai3p = allinfo[25]\r\n # 卖4\r\n gegu_nai4 = allinfo[26]\r\n # 卖4价\r\n gegu_nai4p = allinfo[27]\r\n # 卖5\r\n gegu_nai5 = allinfo[28]\r\n # 卖5价\r\n gegu_nai5p = allinfo[29]\r\n # 日期\r\n gegu_date = allinfo[30]\r\n # 时间\r\n gegu_time = allinfo[31]\r\n # 涨幅\r\n zuoshou = float(gegu_zuoshou)\r\n jg = float(gegu_xianjia)\r\n cha = jg - zuoshou\r\n if cha > 0:\r\n diff = cha / zuoshou\r\n else:\r\n diff = -(-cha / zuoshou)\r\n gegu_diff = str(round(diff * 100, 2)) + '%'\r\n ggu = {\"var hq_str_sh\" + gg_code + \"=\" + gegu_name,gegu_jinkai,gegu_zuoshou,gegu_xianjia,gegu_jindi,gegu_jingao,gegu_diff}\r\n res_info = {'gegu_name': gegu_name,'gegu_jinkai': gegu_jinkai,'gegu_zuoshou': gegu_zuoshou,'gegu_xianjia': gegu_xianjia,\\\r\n # 'gegu_jingmai':gegu_jingmai,'gegu_jingnai':gegu_jingnai,'gegu_chengjiaoshu':gegu_chengjiaoshu,\\\r\n # 'gegu_chengjiaoe':gegu_chengjiaoe,'gegu_mai1':gegu_mai1,'gegu_mai1p':gegu_mai1p,'gegu_mai2':gegu_mai2,\\\r\n # 'gegu_mai2p':gegu_mai2p,'gegu_mai3':gegu_mai3,'gegu_mai3p':gegu_mai3p,'gegu_mai4':gegu_mai4,'gegu_mai4p':gegu_mai4p,\\\r\n # 'gegu_mai5':gegu_mai5,'gegu_mai5p':gegu_mai5p,'gegu_nai1':gegu_nai1,'gegu_nai1p':gegu_nai1p,'gegu_nai2':gegu_nai2,\\\r\n # 'gegu_nai2p':gegu_nai2p,'gegu_nai3':gegu_nai3,'gegu_nai3p':gegu_nai3p,'gegu_nai4':gegu_nai4,'gegu_nai4p':gegu_nai4p,\\\r\n # 'gegu_nai5':gegu_nai5,'gegu_nai5p':gegu_nai5p,'gegu_date':gegu_date,'gegu_time':gegu_time,\r\n 'gegu_jindi': gegu_jindi, 'gegu_jingao': gegu_jingao, 'gegu_diff': gegu_diff}\r\n return res_info,ggu\r\n\r\n# res {'fundcode': '003095', 'name': '中欧医疗健康混合A', 'jzrq': '2020-12-17', 'dwjz': '3.2840', 'gsz': '3.2822', 'gszzl': '-0.06', 'gztime': '2020-12-18 15:00'}\r\ndef getJijin(ji_code):\r\n jijin_gu = requests.get('http://fundgz.1234567.com.cn/js/' + ji_code + '.js?rt=1463558676006')\r\n jijin_jin = requests.get('http://hq.sinajs.cn/list=f_'+ji_code)\r\n status = jijin_gu.status_code\r\n if status != 200:\r\n return False\r\n jijin_info = json.loads(jijin_gu.text[8:-2])\r\n return jijin_info, jijin_jin.text","sub_path":"getInfo.py","file_name":"getInfo.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"19734786","text":"\"\"\"\ndp[i][k]: A[0:i], using k previligies, the length of the longest (contiguous) subarray that contains onlu 1s\n\nA[i] == 1, dp[i][k] = dp[i-1][k] + 1\nA[i] == 0, dp[i][k] = dp[i-1][k-1] + 1\n\nX X X X X 1\n\nX [i X X j] 0 X X\n\nX 1 [X X j] 0 X X\n\nX [i X X j] 0 X X\n\nTranslation:\nFind the longest subarray with at most K zeros.\n\n\n\"\"\"\n\n\nclass SolutionTony:\n def longestOnes(self, A, K: int) -> int:\n i = 0\n count = 0\n res = 0\n for j in range(len(A)):\n if A[j] == 1:\n res = max(res, j - i + 1)\n else:\n count += 1\n while count > K:\n if A[i] == 0:\n count -= 1\n i += 1\n res = max(res, j - i + 1)\n\n return res\n\n\n\nclass Solution:\n def longestOnes(self, A, K):\n res = 0\n i = 0\n for j in range(len(A)):\n K -= A[j] == 0\n if K < 0:\n K += A[i] == 0\n i += 1\n res = j - i + 1\n return res\n\n\n","sub_path":"LeetcodeNew/python2/LC_1004.py","file_name":"LC_1004.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"107755911","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom hascore import app, init_for, views\ninit_for('dev')\nwith app.test_request_context():\n\tviews.networkbar.cache_networkbar_links()\n\ntry:\n port = int(sys.argv[1])\nexcept (IndexError, ValueError):\n port = 8070\napp.run('0.0.0.0', port=port, debug=True)\n","sub_path":"runserver.py","file_name":"runserver.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"348035285","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom math import floor\nimport pandas as pd\nimport unittest\nfrom numpy.random import *\n\ndef naive_encode(state3):\n \"\"\"\n convert_matrix = np.array([[1,2,3,4,5,6,7,8,9],\n [3,2,1,6,5,4,9,8,7],\n [7,4,1,8,5,2,9,6,3],\n [1,4,7,2,5,8,3,6,9],\n [9,8,7,6,5,4,3,2,1],\n [7,8,9,4,5,6,1,2,3],\n [3,6,9,2,5,8,1,4,7],\n [9,6,3,8,5,2,7,4,1]])\n\n\n results = np.zeros(convert_matrix.share[0])\n for i in range(convert_matrix.shape[1]):\n results[i] =\n \"\"\"\n power = np.array([3**8, 3**7, 3**6, 3**5, 3**4, 3**3, 3**2, 3**1, 3**0])\n result = np.dot(state3, power)\n return result\n\nclass MonteCarloPolicyIteration:\n n_states = 3**9\n n_actions = 9 #行動数(最大9箇所)\n steps = 5 # 1 episode(ゲーム)あたりのターン数(敵味方合計)\n\n visits = None # 訪問回数。割引報酬和の標準化に使用される\n states = None\n actions = None\n rewards = None\n discounted_rewards = None\n results = None\n rates = []\n\n # 盤面を回転させると同じ状態になる\n # その変換を行い状態数を減らす\n convert = [[0,1,2,3,4,5,6,7,8], # 元の状態\n [2,1,0,5,4,3,8,7,6], # 変換(2)\n [6,3,0,7,4,1,8,5,2], # 変換(3)\n [0,3,8,1,4,7,2,5,8], # 変換(4)\n [8,7,6,5,4,3,2,1,0], # 変換(5)\n [6,7,8,3,4,5,0,1,2], # 変換(6)\n [2,5,8,1,4,7,0,3,6], # 変換(7)\n [8,5,2,7,4,1,6,3,0] # 変換(8)\n ]\n\n power = np.array([3**i for i in range(8, -1, -1)], dtype=np.float64)\n\n def __init__(self, policy_iterations, episodes, options):\n self.policy_iterations = policy_iterations\n self.episodes = episodes\n self.Q = self.init_Q()\n self.options = options\n np.random.seed(555)\n\n def init_Q(self):\n return np.zeros((self.n_states, self.n_actions)) # row 7, p56\n\n def train(self):\n for policy_index in range(self.policy_iterations):\n self.visits = self.init_visits()\n self.states = self.init_matrix() # 盤面?\n self.actions = self.init_matrix() # 各episodeの、各stepごとの行動\n self.rewards = self.init_matrix() # 各episodeの、各stepごとの報酬\n self.discounted_rewards = self.init_matrix() # 各episodeの、各stepごとの割引報酬??\n self.results = self.init_results() # ゲームの結果報酬 results.len <=> episodes\n\n for episode_index in range(self.episodes):\n state3 = self.init_state3()\n\n for step_index in range(self.steps):\n # state3には、このゲームでの状態が引き継がれる\n state = naive_encode(state3)\n #state = self.encode(state3)\n policy = self.generate_policy()\n # ここまでの政策反復で得たQ関数を用いて政策改善\n # 政策改善はあるQ関数の下では同じQ関数(評価関数)を\n # 用いて、政策を出力し続ける\n policy = self.improve_policy(policy, state)\n\n # row 44, in p.56\n action, reward, state3, fin = self.action_train(policy, step_index, state3)\n\n self.update(episode_index, step_index, state, action, reward)\n\n if self.is_finished(fin):\n #今ゲームの結果を格納\n self.results[episode_index] = fin\n\n # 今ゲームの割引報酬和(各episodeの各stepごとに格納)\n self.discounted_rewards[episode_index] = self.calculate_discounted_rewards(episode_index, step_index)\n break\n # 行動価値関数(評価関数)を生成(政策評価)\n self.Q = self.calculate_state_action_value_function()\n self.output_results(policy_index)\n #print(\"Q state value function : \", self.Q)\n self.rates.append(self.calculate_win_ratio())\n\n def init_visits(self):\n return np.ones((self.n_states,self.n_actions))\n\n def init_matrix(self):\n return np.ones((self.episodes, self.steps))\n\n def init_return(self):\n return np.zeros(self.episodes)\n\n def init_results(self):\n return np.zeros(self.episodes)\n\n def init_state3(self):\n return np.zeros(self.n_actions)\n\n def generate_policy(self):\n return np.zeros(self.n_actions)\n\n def improve_policy(self, policy, state):\n if self.options[\"pmode\"]==0:\n q = self.Q[state]\n v = max(q)\n a = np.where(q==v)[0][0]\n policy[a] = 1\n elif self.options[\"pmode\"]==1:\n q = self.Q[state]\n v = max(q)\n a = np.where(v==q)[0][0]\n policy = np.ones(self.n_actions) * self.options[\"epsilon\"] / self.n_actions\n policy[a] = 1 - self.options[\"epsilon\"] + self.options[\"epsilon\"] / self.n_actions\n elif self.options[\"pmode\"] == 2:\n policy = np.exp(self.Q[state] / self.options[\"tau\"]) / sum(np.exp(self.Q[state] / self.options[\"tau\"]))\n\n return policy\n\n # play 1 game\n def action_train(self, policy, step_index, state3):\n # assumption : training player move first\n\n # まずはplayerの行動選択\n npc_action = self.select_npc_action(step_index, policy, state3)\n # 2は学習プレイヤーの行動した印\n state3[npc_action] = 2\n checked_result = self.judge(state3)\n reward = self.calculate_reward(checked_result)\n\n if reward is not None:\n return npc_action, reward, state3, checked_result\n\n # 次にenemyの行動選択\n enemy_action = self.select_enemy_action(step_index, state3)\n state3[enemy_action] = 1\n checked_result = self.judge(state3)\n reward = self.calculate_reward(checked_result)\n\n return npc_action, reward, state3, checked_result\n\n def update(self, episode_index, step_index, state, action, reward):\n # state, action, rewardを保存\n self.states[episode_index][step_index] = state\n self.actions[episode_index][step_index] = action\n self.rewards[episode_index][step_index] = reward\n\n # 出現回数の更新\n self.visits[state][action] += 1\n\n # 更新後のaction一覧を表示\n #print(\"episode : \", episode_index, \"step : \", step_index)\n #print(\"actions : \", self.actions)\n\n def is_finished(self, fin):\n return fin > 0\n\n def calculate_discounted_rewards(self, episode_index, last_index):\n discounted_rewards = np.zeros(self.steps)\n discounted_rewards[last_index] = self.rewards[episode_index][last_index]\n for step_index_from_last in range(last_index - 1, -1, -1):\n step_index_plus = step_index_from_last + 1\n discounted_rewards[step_index_from_last] = \\\n self.options[\"gamma\"] * discounted_rewards[step_index_plus]\n return discounted_rewards\n\n def calculate_state_action_value_function(self):\n # 各状態での、各行動の価値をQに格納する。\n # 価値を表現するルックアップテーブルを作成するイメージ。\n # Q : state, action\n Q = self.init_Q()\n for episode_index in range(self.episodes):\n for step_index in range(self.steps):\n this_state = self.states[episode_index][step_index]\n # ゲームが続行しているなら、0以外の値に更新されている。\n # 0ということは、そのepisodeではそのstepまでの間に\n # ゲームが終了しているということ。\n if this_state == 0:\n # ゲームが終了していれば、それ以降の割引報酬も0のため、\n # 計算打ち切り\n break\n action = self.actions[episode_index][step_index]\n Q[this_state][action] += \\\n self.discounted_rewards[episode_index][step_index]\n # 最後、Qを訪問回数で割っているのはなぜ?\n # 訪問回数分、割引報酬が加算されているため、\n # 単純に訪問回数の多いstate * actionのセルの報酬が高くなってしまうため。\n return Q / self.visits\n\n def calculate_win_ratio(self):\n return float(len(self.results[self.results == 2])) / float(self.episodes)\n\n # 印をつけるセルを返す\n def select_npc_action(self, step_index, policy, state3):\n a = None\n if step_index == 0:\n a = 0\n else:\n while 1:\n random = np.random.rand()\n cumulative_probability = 0\n for a in range(self.n_actions):\n cumulative_probability += policy[a]\n if cumulative_probability > random :\n break\n\n # 既に0埋めされてないか確認\n # 0以外の場合、既に行動済み\n # 行動済みの場合、行動を重ねられないため\n # 別の行動が選ばれるよう、もう一回行動ガチャを回す\n if state3[a] == 0:\n break\n # 第1step なら 0のマスに印をつける\n return a\n\n def judge(self, state3):\n fin_positions = [[0,1,2], [3,4,5], [6,7,8], [0,3,6], [1,4,7], [2,5,8], [0,4,8], [2,4,6]]\n # fin_positionに含まれるマスの組のうち、全てが1もしくは2のものが、state3に含まれていた場合、\n # ゲーム終了\n #for position in fin_positions:\n # for i in position:\n for i in range(len(fin_positions)):\n state_i = state3[fin_positions[i]]\n val_npc = sum(state_i == 2)\n val_enemy = sum(state_i == 1)\n if val_npc == 3:\n # win player\n return 2\n if val_enemy == 3:\n # win enemy\n return 1\n is_actioned_all_cell = sum(state3 == 0) == 0\n if (is_actioned_all_cell):\n # tie\n return 3\n # game is continued\n return 0\n\n def calculate_reward(self, finished_state):\n # assumption: finished_state is contained in {0, 1, 2, 3} space\n if finished_state == 2:\n return 10\n if finished_state == 1:\n return -10\n if finished_state == 3:\n return None\n if finished_state == 0:\n return 0\n\n def select_enemy_action(self, step_index, state3):\n \"\"\"\n reach = 0\n pos = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 3, 6], [1, 4, 7], [1, 5, 8], [0, 4, 8], [2, 4, 6]]\n a = None\n for i in range(len(pos)):\n # print(\"state3 : \", state3)\n # print(\"pos[i] : \", pos[i])\n # print(\"state3[pos[i]] : \", state3[pos[i]])\n state_i = state3[pos[i]]\n # print(\"state_i : \", state_i)\n val = sum(state_i)\n # print(\"val : \", val)\n num = len(state_i[state_i == 0])\n if val == 2 and num == 1:\n # print(\"state_i == 0 : \", state_i == 0)\n # print(\"state_i[state_i==0] : \", state_i[state_i == 0])\n idx = int(state_i[state_i == 0][0])\n # print(\"idx : \", idx)\n a = pos[i][idx]\n reach = 1\n break\n if reach == 0:\n while 1:\n a = floor(np.random.rand() * 8) + 1\n if state3[a] == 0: break\n return a\n \"\"\"\n # あと1つでenemy winとなるならそのセルを埋めに行く\n fin_positions = [\n [0, 1, 2],\n [3, 4, 5],\n [6, 7, 8],\n [0, 3, 6],\n [1, 4, 7],\n [2, 5, 8],\n [0, 4, 8],\n [2, 4, 6]]\n for i in range(len(fin_positions)):\n # ゲーム終了条件に関係するセルの状態のみ抽出\n state_i = state3[fin_positions[i]]\n print(\"state_i : \", state_i)\n # val \\in [0, 6]\n val = sum(state_i)\n # どちらも印をつけてないセルの数\n num_of_no_action_cells = len(state_i[state_i == 0])\n if val == 2 and num_of_no_action_cells == 1:\n # [1, 1, 0]のようなstate_iの場合のみ、この分岐\n # idx : 0となっている(まだ印のついていないインデックス\n idx = state_i.index(0)\n a = fin_positions[i][idx]\n # この場合これ以上良い行動はないので、\n # 決めたセルを行動として返す\n return a\n\n # 上のような1手詰みの状況でなければ、\n # ランダムに手を選ぶ\n while 1:\n a = np.random.randint(10)\n if state3[a] == 0:\n # 選んだセルにまだ印がついていなければ\n # そのセルに印をつけることに決定。\n # 選んだセルを行動として返す\n return a\n\n def output_results(self, l):\n print('l=%d: Win=%d/%d, Draw=%d/%d, Lose=%d/%d\\n' % (l, \\\n len(self.results[self.results == 2]), self.episodes, \\\n len(self.results[self.results == 3]), self.episodes, \\\n len(self.results[self.results == 1]), self.episodes))\n\n def encode(self, state3):\n # stateに(2)~(8)の8種類の変換を加えた後、10進数へ変換\n cands = [ sum(state3[self.convert[i]]*self.power) # indexを入れ替えて、10進数に変換\n for i in range(len(self.convert))]\n # 8個の候補のうち一番小さいものを選ぶ\n return min(cands)+1\n\ndef main():\n ################# arguments #################\n # L\n policy_iterations = 10000\n # M\n episodes = 100\n\n options = {\"gamma\": 0.9, \"tau\": 2, \"epsilon\": 0.05, \"pmode\": 1}\n\n #action = montecarlo_policy_iteration(policy_iterations, episodes, options)\n #print(action)\n mcpi = MonteCarloPolicyIteration(policy_iterations, episodes, options)\n mcpi.train()\n plt.plot(range(len(mcpi.rates)), mcpi.rates)\n plt.show()\n\n\nclass test_encode(unittest.TestCase):\n def test_encode_3_to_10(self):\n test_input = np.array([1, 0, 0, 0, 0, 0, 0, 0, 2])\n expected = 6563\n self.assertAlmostEqual(expected, naive_encode(test_input), delta=10e-12)\n\nif __name__ == '__main__':\n main()\n unittest.main()\n\n","sub_path":"rl/rl_sample.py","file_name":"rl_sample.py","file_ext":"py","file_size_in_byte":14898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"161651731","text":"import socket\nfrom MLP import model, np, class_names\n\ncount = 0;\nhost = '192.168.0.10' # Symbolic name meaning all available interfaces\nport = 9999 # Arbitrary non-privileged port\n\nserver_sock = socket.socket(socket.AF_INET)\nserver_sock.bind((host, port))\nserver_sock.listen(1)\n\nprint(\"기다리는 중\")\nclient_sock, addr = server_sock.accept()\n\n\nprint('Connected by', addr)\n\n# 서버에서 \"안드로이드에서 서버로 연결요청\" 한번 받음\ndata = client_sock.recv(1024)\nprint(data.decode(\"utf-8\"), len(data))\n\nwhile (True):\n # 안드로이드에서 값 받으면 \"하나받았습니다 : 숫자\" 보낼 것 받음\n data = client_sock.recv(1024)\n data1 = data.decode('utf-8')\n data1 = data1.replace(\"[\", \"\")\n data1 = data1.replace(\"]\", \"\")\n data1 = data1.replace(\",\", \"\")\n msp = data1.split()\n input = list(map(int, msp))\n count += 1\n print('Received', input)\n X_test = np.array([input])\n predictions = model.predict(X_test)\n result = class_names[np.argmax(predictions)]\n print(result)\n client_sock.send(result.encode())\n\n\n\n# 연결끊겠다는 표시 보냄\n# i=99\n# client_sock.send(i.to_bytes(4, byteorder='little'))\n\nclient_sock.close()\nserver_sock.close()","sub_path":"13주차/Smart_Museum(서버포함)/server/Server.py","file_name":"Server.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"299716314","text":"# Uses python3\r\nimport sys\r\nimport math\r\nimport heapq\r\n\r\n\r\nclass node:\r\n def __init__(self, x, y, i):\r\n self.point = (x, y)\r\n self.dist = sys.maxsize\r\n self.num = i\r\n\r\n\r\ndef distance(a, b):\r\n x1 = a.point[0]\r\n x2 = b.point[0]\r\n y1 = a.point[1]\r\n y2 = b.point[1]\r\n return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5\r\n\r\n\r\ndef minimum_distance(x, y):\r\n result = 0.\r\n li = []\r\n dist_li = []\r\n for i in range(len(x)):\r\n e = node(x[i], y[i], i)\r\n li.append(e)\r\n dist_li.append([sys.maxsize, i])\r\n li[0].dist = 0\r\n dist_li[0][0] = 0\r\n for i in range(1, len(x) + 1):\r\n q = min(dist_li, key=lambda x: x[0])\r\n dist_li.remove(q)\r\n result += q[0]\r\n o = li[q[1]]\r\n for j in dist_li:\r\n s = distance(o, li[j[1]])\r\n if s < li[j[1]].dist:\r\n li[j[1]].dist = s\r\n j[0] = s\r\n\r\n return result\r\n\r\n\r\nprint(minimum_distance([0, 0, 1, 1], [0, 1, 0, 1]))\r\nprint(minimum_distance([0, 0, 1, 3, 3], [0, 2, 1, 0, 2]))\r\n\r\n\r\nif __name__ == '__main__':\r\n input = sys.stdin.read()\r\n data = list(map(int, input.split()))\r\n n = data[0]\r\n x = data[1::2]\r\n y = data[2::2]\r\n print(\"{0:.9f}\".format(minimum_distance(x, y)))\r\n","sub_path":"mysolution/connecting_points.py","file_name":"connecting_points.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"18472300","text":"import os\nimport re\nimport subprocess\nimport sys\n\n\ndef run_command(command):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n return_code = proc.returncode\n\n if return_code:\n raise Exception(proc.stdout, return_code)\n\n for line in proc.stdout.readlines():\n yield line.strip().decode('utf-8')\n\n\ndef run_backup_in_container(container):\n command = ['docker', 'container', 'exec', '-it', container, 'backup']\n return run_command(command)\n\n\ndef copy_backup_file(container, filename, destination_folder):\n command = ['docker', 'cp', '{}:/backups/{}'.format(container, filename), destination_folder]\n return run_command(command)\n\n\ndef copy_to_sharepoint(moved_file, share_point_folder):\n command = ['copy', moved_file, share_point_folder]\n print('Running command {}'.format(' '.join(command)))\n return run_command(command)\n\n\nif __name__ == '__main__':\n \"\"\"\n Script to backup the Postgres database.\n \n This Script:\n # Runs the backup process in the container.\n # Copies the backup file from the container to the ../backups directory.\n # Copies the backup file to OneDrive into the pmp_shield folder \n \n To use:\n # cd to the scripts folder\n # Run::\n $ python backup.py\n \"\"\"\n container = 'pmp_shield_postgres_1'\n reg_exp_for_filename = r'successfully\\screated\\sbackup\\s(pmp_shield_backup_\\d{8}_\\d{6}\\.sql\\.gz)'\n share_point_environment_variable = 'OneDrive'\n share_point_folder = os.environ.get(share_point_environment_variable)\n share_point_backup_folder = os.path.join(share_point_folder, 'pmp_shield')\n download_folder = '../backups/' # Linux\n regex = re.compile(reg_exp_for_filename)\n filename = None\n\n lines = run_backup_in_container(container)\n for line in lines:\n match = regex.match(line)\n if match:\n filename = match.group(1)\n if 'Error response from daemon' in line:\n print('Cannot run backup. It seems your postgres container is not up')\n sys.exit()\n #print(line)\n\n print('Filename: {}'.format(filename))\n print('--' * 30)\n\n lines = copy_backup_file(container, filename, download_folder)\n for line in lines:\n print(line)\n\n downloaded_file = os.path.join(download_folder.replace('/', '\\\\'), filename)\n if not os.path.exists(downloaded_file):\n print('Error file was not copied')\n # print('OneDrive: {}'.format(os.environ.get('OneDrive')))\n current_file = os.path.dirname(os.path.abspath(__file__))\n print('Current file: {}'.format(current_file))\n lines = copy_to_sharepoint(downloaded_file, share_point_backup_folder)\n for line in lines:\n print(line)\n","sub_path":"scripts/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"195254815","text":"#!/usr/bin/env python\n\nlista = [32, 34, 46, 4545, 82, 79, 64, 28, 11, 512]\n\ndef Heapify(B, i, m):\n left = 2 * i\n right = 2 * i + 1\n maxindex = i\n if left <= m and B[left] > B[maxindex]:\n maxindex = left\n if right <= m and B[right] > B[maxindex]:\n maxindex = right\n if maxindex != i:\n B[i], B[maxindex] = B[maxindex], B[i]\n Heapify(B, maxindex, m)\n \ndef BuildHeap(n, A):\n for i in range(int(n/2-1), -1, -1):\n Heapify(A, i, n)\n return A;\n\nprint(BuildHeap(len(lista), lista))\n\ndef HeapSort(n, A):\n BuildHeap(n, A)\n k = n\n while k >= 1:\n A[0], A[k-1] = A[k-1], A[0]\n k = k - 1\n Heapify(A, 0, k-1)\n return A;\n \nprint(HeapSort(len(lista), lista))\n","sub_path":"Algorithms_HeapSort.py","file_name":"Algorithms_HeapSort.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"358132494","text":"from django.shortcuts import render,redirect\nfrom django.shortcuts import get_object_or_404\nfrom store.models import *\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom store.forms import *\nfrom django.http import Http404\nimport datetime\n# Create your views here.\n\ndef index(request):\n\treturn render(request, 'store/index.html')\n\ndef bookDetailView(request, bid):\n\ttemplate_name = 'store/book_detail.html'\n\tbook=Book.objects.filter(id=bid).first()\n\tnum_available=BookCopy.objects.filter(book=book).filter(status=True).count()\n\tcontext = {\n\t\t'book': book, # set this to an instance of the required book\n\t\t'num_available': num_available, # set this to the number of copies of the book available, or 0 if the book isn't available\n\t}\n\t\n\treturn render(request, template_name, context=context)\n\n\n@csrf_exempt\ndef bookListView(request):\n\ttemplate_name = 'store/book_list.html'\n\tget_data = request.GET\n\t\n\tbooks = Book.objects.all()\n\tif 'title' in get_data.keys():\n\t\tbooks = books.filter(title__icontains = get_data['title'])\n\tif 'author' in get_data.keys():\n\t\tbooks = books.filter(author__icontains = get_data['author'])\n\tif 'genre' in get_data.keys():\n\t\tbooks = books.filter(genre__icontains = get_data['genre'])\n\n\tcontext = {\n\t\t'books': books,\n\t}\n\treturn render(request, template_name, context=context)\n\n\n\n@login_required\ndef viewLoanedBooks(request):\n\ttemplate_name = 'store/loaned_books.html'\n\tbooks=BookCopy.objects.filter(borrower=request.user)\n\t'''\n\tThe above key 'books' in the context dictionary should contain a list of instances of the \n\tBookCopy model. Only those book copies should be included which have been loaned by the user.\n\t'''\n\tcontext = {\n\t\t'books': books,\n\t}\n\n\treturn render(request, template_name, context=context)\n\n@csrf_exempt\n@login_required\ndef loanBookView(request):\n\t'''\n\tCheck if an instance of the asked book is available.\n\tIf yes, then set the message to 'success', otherwise 'failure'\n\t'''\n\tif request.method == 'POST':\n\t\tmessage=''\n\t\tbook_id = request.POST.get('bid') # get the book id from post data\n\t\tbook=Book.objects.filter(id=book_id).first()\n\t\tloan=BookCopy.objects.filter(book=book).filter(status=True)\n\t\tif loan.count()!=0:\n\t\t\tloan=loan.first()\n\t\t\tloan.status=False\n\t\t\tloan.borrower=request.user\n\t\t\tloan.borrow_date=datetime.date.today() \n\t\t\tloan.save()\n\t\t\tmessage='success'\n\t\telse:\n\t\t\tmessage='failure'\n\t\n\t\tresponse_data = {\n\t\t\t'message': message,\n\t\t}\n\t\treturn JsonResponse(response_data)\n\telse :\n\t\t raise Http404(\"Error :(\")\n\n'''\nFILL IN THE BELOW VIEW BY YOURSELF.\nThis view will return the issued book.\nYou need to accept the book id as argument from a post request.\nYou additionally need to complete the returnBook function in the loaned_books.html file\nto make this feature complete\n''' \n@csrf_exempt\n@login_required\ndef returnBookView(request):\n\tif request.method=='POST':\n\t\tbook_id=request.POST.get('bid')\n\n\t\tbook=BookCopy.objects.filter(id=book_id).first()\n\t\tif book.status == False:\n\t\t\tbook.borrow_date=None\n\t\t\tbook.borrower=None\n\t\t\tbook.status=True\n\t\t\tbook.save()\n\t\t\tmessage='success'\n\t\telse :\n\t\t\tmessage='failure'\n\n\t\tresponse_data={\n\t\t\t'message':message,\n\t\t}\n\t\treturn JsonResponse(response_data)\n\telse :\n\t\traise Http404(\"Error :(\")\n\n\n@csrf_exempt\n@login_required\ndef rating(request,bid):\n\n\ttemplate_name='store/rating.html'\n\tif request.method=='POST':\n\n\t\tform=RatingForms(request.POST)\n\t\ttotal_rating = 0\n\t\tif form.is_valid():\n\n\t\t\tnew_rating = float(request.POST.get('rating'))\t\n\t\t\tbook = Book.objects.filter(id=bid).first()\n\n\t\t\ttotal_user_ratings = BookRating.objects.filter(book=book)\n\t\t\ttotal_rating = 0\n\n\t\t\tfor user_ratings in total_user_ratings :\n\t\t\t\ttotal_rating += user_ratings.ratings\n\n\t\t\tuser_book = BookRating.objects.filter(book=book,user=request.user)\n\n\t\t\tif user_book.count() !=0:\n\t\t\t\tuser_book=user_book.first()\n\t\t\t\ttotal_rating += (new_rating - user_book.ratings)\t\n\t\t\t\ttotal_rating /= total_user_ratings.count()\n\n\t\t\telse :\n\t\t\t\tnew_user=BookRating(book=book ,\tuser=request.user , ratings=new_rating)\n\t\t\t\ttotal_rating += new_rating\n\t\t\t\ttotal_rating /= (total_user_ratings.count()+1)\n\t\t\t\tnew_user.save()\n\n\t\t\tbook.rating = total_rating\n\t\t\tbook.save()\n\t\t\tcontext ={\n\t\t\t\t'book': book, \n\t\t\t}\n\n\n\t\treturn render(request, template_name, context=context)\n\n\telse :\n\t\traise Http404(\"Error :(\")\n\t\t\t\n@csrf_exempt\n\ndef signup(request):\n\n\tform= SignupForms(request.POST)\n\t# template_name='registration/login.html'\n\tif request.method=='POST':\n\t\t\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('/accounts/login')\n\n\telse:\n\t\tform = SignupForms()\n\t\n\treturn render(request, 'registration/signup.html', {'form': form})\n\n\n","sub_path":"store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"399957961","text":"from threading import RLock, Condition\r\nclass SharedInteger():\r\n def __init__(self):\r\n self.number = 0\r\n self.lock = RLock()\r\n self.condition = Condition(self.lock)\r\n def get(self):\r\n with self.lock:\r\n return self.number\r\n def set(self, n : int):\r\n with self.lock:\r\n self.number = n\r\n self.condition.notifyAll()\r\n def inc(self, i):\r\n if isinstance(i, SharedInteger):\r\n self.lock.acquire()\r\n i.lock.acquire()\r\n self.number += i.get()\r\n self.condition.notifyAll()\r\n i.condition.notifyAll() \r\n self.lock.release()\r\n i.lock.release()\r\n else:\r\n with self.lock:\r\n self.number += i\r\n self.condition.notifyAll()\r\n def waitForAtLeast(self, soglia):\r\n with self.lock:\r\n while self.number < soglia:\r\n self.condition.wait()\r\n return self.number\r\n def setInTheFuture(self, i, soglia, valore):\r\n with self.lock:\r\n while i.get() < soglia:\r\n self.condition.wait()\r\n self.number = valore","sub_path":"Febbraio2018/SharedInteger.py","file_name":"SharedInteger.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"428590633","text":"\"\"\"\nMDP 0.2 Client implementation\n\"\"\"\n\nimport logging\n\nimport zmq\n\nfrom mdp import error as e\nfrom mdp import protocol as p\n\nDEFAULT_ZMQ_LINGER = 2000\n\n\nclass Client(object):\n \"\"\"\n MDP 0.2 Client implementation\n \"\"\"\n def __init__(self, broker_url, zmq_context=None, zmq_linger=DEFAULT_ZMQ_LINGER):\n self.broker_url = broker_url\n self._socket = None\n self._zmq_context = zmq_context \\\n if zmq_context \\\n else zmq.Context.instance()\n self._linger = zmq_linger\n self._log = logging.getLogger(__name__)\n self._expect_reply = False\n\n def connect(self, reconnect=False):\n if self.is_connected():\n if not reconnect:\n return\n self._disconnect()\n # Set up socket\n self._socket = self._zmq_context.socket(zmq.DEALER)\n self._socket.setsockopt(zmq.LINGER, self._linger)\n self._socket.connect(self.broker_url)\n self._log.debug(\"Connected to broker on ZMQ DEALER socket at %s\", self.broker_url)\n self._expect_reply = False\n\n def close(self):\n if not self.is_connected():\n return\n self._disconnect()\n\n def _disconnect(self):\n if not self.is_connected():\n return\n self._log.debug(\n \"Disconnecting from broker on ZMQ DEALER socket at %s\",\n self.broker_url\n )\n self._socket.setsockopt(zmq.LINGER, 0)\n self._socket.disconnect(self.broker_url)\n self._socket.close()\n self._socket = None\n\n def is_connected(self):\n \"\"\"\n Tell whether we are currently connected\n \"\"\"\n return self._socket is not None\n\n def send(self, service, *args):\n \"\"\"\n Send a REQUEST command to the broker to be passed to the given service.\n Each additional argument will be sent as a request body frame.\n \"\"\"\n if self._expect_reply:\n raise e.StateError(\"Still expecting reply from broker, cannot send new request\")\n self._log.debug(\"Sending REQUEST message to %s with %d frames in body\", service, len(args))\n self._socket.send_multipart((b'', p.CLIENT_HEADER, p.REQUEST, service) + args)\n self._expect_reply = True\n\n def recv_part(self, timeout=None):\n \"\"\"\n Receive a single part of the reply, partial or final\n Note that a \"part\" is actually a list in this case, as any reply part\n can contain multiple frames.\n If there are no more parts to receive, will return None\n \"\"\"\n if not self._expect_reply:\n return None\n timeout = int(timeout * 1000) if timeout else None\n poller = zmq.Poller()\n poller.register(self._socket, zmq.POLLIN)\n try:\n socks = dict(poller.poll(timeout=timeout))\n if socks.get(self._socket) == zmq.POLLIN:\n message = self._socket.recv_multipart()\n m_type, m_content = self._parse_message(message)\n if m_type == p.FINAL:\n self._expect_reply = False\n return m_content\n else:\n raise e.Timeout(\"Timed out waiting for reply from broker\")\n finally:\n poller.unregister(self._socket)\n\n def recv_all(self, timeout=None):\n \"\"\"\n Return a generator allowing to iterate over all reply parts\n Note that `timeout` applies to each part, not to the full list of parts\n \"\"\"\n while True:\n part = self.recv_part(timeout)\n if part is None:\n break\n yield part\n\n def recv_all_as_list(self, timeout=None):\n \"\"\"\n Return all reply parts as a single, flat list of frames\n \"\"\"\n return [frame for part in self.recv_all(timeout) for frame in part]\n\n @staticmethod\n def _parse_message(message):\n \"\"\"\n Parse and validate an incoming message\n \"\"\"\n if len(message) < 3:\n raise e.ProtocolError(\n \"Unexpected message length, expecting at least 3 frames, \"\n \"got {}\".format(len(message))\n )\n if message.pop(0) != b'':\n raise e.ProtocolError(\"Expecting first message frame to be empty\")\n if message[0] != p.CLIENT_HEADER:\n print(message)\n raise e.ProtocolError(\n \"Unexpected protocol header [{}], expecting [{}]\".format(\n message[0], p.WORKER_HEADER)\n )\n if message[1] not in {p.PARTIAL, p.FINAL}:\n raise e.ProtocolError(\n \"Unexpected message type [{}], expecting either \"\n \"PARTIAL or FINAL\".format(message[1])\n )\n return message[1], message[2:]\n\n def __enter__(self):\n self.connect()\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.close()\n","sub_path":"mdp/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"15487672","text":"from torch import nn\nimport torch.nn.functional as F\n\nclass Encoder(nn.Module):\n def __init__(self):\n super(Encoder, self).__init__()\n self.conv1 = nn.Conv3d(3, 32, (1, 3, 3), padding=(0, 1, 1))\n self.conv2 = nn.Conv3d(32, 64, (1, 3, 3), padding=(0, 1, 1))\n self.conv3 = nn.Conv3d(64, 128, (1, 3, 3), padding=(0, 1, 1))\n self.mp = nn.MaxPool3d((1, 2, 2))\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.mp(x)\n x = self.conv2(x)\n x = self.mp(x)\n x = self.conv3(x)\n x = self.mp(x)\n return x\n\nclass Decoder(nn.Module):\n def __init__(self):\n super(Decoder, self).__init__()\n self.conv1 = nn.Conv3d(128, 64, (1, 3, 3), padding=(0, 1, 1))\n self.conv2 = nn.Conv3d(64, 32, (1, 3, 3), padding=(0, 1, 1))\n self.conv3 = nn.Conv3d(32, 3, (1, 3, 3), padding=(0, 1, 1))\n\n def forward(self, x):\n x = F.interpolate(x, scale_factor=(1, 2, 2), mode='nearest')\n x = self.conv1(x)\n x = F.interpolate(x, scale_factor=(1, 2, 2), mode='nearest')\n x = self.conv2(x)\n x = F.interpolate(x, scale_factor=(1, 2, 2), mode='nearest')\n x = self.conv3(x)\n return x\n","sub_path":"attention/mycodec/3dcnn.py","file_name":"3dcnn.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"260158108","text":"from django.shortcuts import render, redirect\nfrom .models import Email\nfrom django.contrib import messages\n# Create your views here.\ndef index(request):\n context = {\n 'emails': Email.objects.all()\n }\n return render(request, \"email_val/index.html\", context)\n\ndef validate(req):\n email_check = Email.objects.validate(req.POST['email'])\n if email_check == False:\n messages.error(req, 'Invalid Email')\n else:\n messages.success(req, 'Valid Email!')\n\n return redirect('/')\n\ndef remove(req, id):\n Email.objects.filter(id=id).delete()\n return redirect('/')\n","sub_path":"Python/Django/emailValidationMain/apps/email_val/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"98575093","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: C:\\Users\\sboon\\AppData\\Local\\Temp\\pip-install-ptdbtr91\\quarchpy\\quarchpy\\disk_test\\iometerDiskFinder.py\n# Compiled at: 2020-03-25 05:10:07\n# Size of source mod 2**32: 6765 bytes\nimport re, time, sys\nwmi = None\nfrom sys import platform\nfrom subprocess import check_output\nimport quarchpy.disk_test.AbsDiskFinder as AbsDiskFinder\nfrom quarchpy.user_interface import *\n\nclass iometerDiskFinder(AbsDiskFinder):\n\n def __init__(self):\n global wmi\n if platform == 'win32':\n try:\n import wmi as newImport\n wmi = newImport\n except ImportError:\n raise ImportError(\"'wmi' module required, please install this\")\n\n try:\n import win32file, win32api\n except ImportError:\n raise ImportError(\"'pywin32' module required, please install this\")\n\n def returnDisk(self):\n deviceList = self.findDevices()\n myDeviceID = self.formatList(deviceList)\n return myDeviceID\n\n def findDevices(self):\n diskList = self.getAvailableDisks('OS')\n driveList = self.getAvailableDrives()\n deviceList = driveList + diskList\n return deviceList\n\n def formatList(self, deviceList):\n printText('\\n\\n ########## STEP 2 = Select a target drive. ##########\\n')\n printText(' ------------------------------------------------------------------')\n printText(' | {:^5} | {:^10} | {:^35} |'.format('INDEX', 'VOLUME', 'DESCRIPTION'))\n printText(' ------------------------------------------------------------------')\n templ = ' | %5s | %10s | %35s |'\n for idx, i in enumerate(deviceList):\n printText(templ % (str(idx + 1), i.get('DRIVE'), i.get('NAME')))\n printText(' ------------------------------------------------------------------')\n\n try:\n drive_index = int(raw_input('\\n>>> Enter the index of the target device: ')) - 1\n except NameError:\n drive_index = int(input('\\n>>> Enter the index of the target device: ')) - 1\n\n if drive_index > -1:\n myDeviceID = deviceList[drive_index]\n else:\n myDeviceID = None\n return myDeviceID\n\n def getAvailableDisks(self, hostDrive):\n driveList = []\n diskNum = 0\n diskScan = wmi.WMI()\n for disk in diskScan.Win32_diskdrive(['Caption', 'DeviceID', 'FirmwareRevision']):\n driveInfo = {'NAME':None, \n 'DRIVE':None, \n 'FW_REV':None}\n DiskInfo = str(disk)\n DiskInfo.strip()\n a = re.search('Caption = \"(.+?)\";', DiskInfo)\n if a:\n diskName = a.group(1)\n b = re.search('DRIVE(.+?)\";', DiskInfo)\n if b:\n if b == '':\n b = '\"\"'\n diskId = b.group(1)\n diskFw = None\n c = re.search('FirmwareRevision = \"(.+?)\";', DiskInfo)\n if c:\n diskFw = c.group(1)\n if diskName != hostDrive:\n driveInfo.update(dict(zip(['NAME', 'DRIVE', 'FW_REV'], [diskName, diskId, diskFw])))\n driveList.append(driveInfo)\n\n return driveList\n\n def getAvailableDrives(self):\n RList = check_output('wmic logicaldisk get caption, Description')\n if sys.version_info.major == 3:\n RList = str(RList, 'utf-8')\n RList_Lines = RList.split('\\n')\n RList_MinusNetwork = []\n for item in RList_Lines:\n if 'Network Connection' not in item and len(item) > 0:\n RList_MinusNetwork.append(item[0:item.find(' ')])\n\n del RList_MinusNetwork[0]\n RList_MinusNetwork = self.remove_values_from_list(RList_MinusNetwork, '\\r')\n RL_DrivesAndVolumeInfo = []\n for i in RList_MinusNetwork:\n i.replace(':', '://')\n try:\n RL_DrivesAndVolumeInfo.append(win32api.GetVolumeInformation(i)[0])\n RL_DrivesAndVolumeInfo.append(i)\n time.sleep(0.1)\n except:\n continue\n\n driveList = []\n try:\n for i in xrange(0, len(RL_DrivesAndVolumeInfo), 2):\n driveInfo = {'NAME':None, \n 'DRIVE':None, \n 'FW_REV':None}\n driveInfo.update(dict(zip(['NAME', 'DRIVE', 'FW_REV'], [RL_DrivesAndVolumeInfo[i], RL_DrivesAndVolumeInfo[(i + 1)], ''])))\n driveList.append(driveInfo)\n\n except:\n for i in range(0, len(RL_DrivesAndVolumeInfo), 2):\n driveInfo = {'NAME':None, \n 'DRIVE':None, \n 'FW_REV':None}\n driveInfo.update(dict(zip(['NAME', 'DRIVE', 'FW_REV'], [RL_DrivesAndVolumeInfo[i], RL_DrivesAndVolumeInfo[(i + 1)], ''])))\n driveList.append(driveInfo)\n\n return driveList\n\n def remove_values_from_list(self, the_list, val):\n return [value for value in the_list if value != val]","sub_path":"pycfiles/quarchpy-2.0.14-py2.py3-none-any/iometerDiskFinder.cpython-37.py","file_name":"iometerDiskFinder.cpython-37.py","file_ext":"py","file_size_in_byte":5160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"632955254","text":"\nPREFIX = [None, \"meth\", \"eth\", \"prop\",\n \"but\", \"pent\", \"hex\", \"hept\",\n \"oct\", \"non\", \"dec\"]\n\nNUM = [\"di\", \"tri\", \"tetra\"]\n\n\nclass BaseCompound:\n def __init__(self, name):\n self.name = name\n self.branches = []\n\n def __str__(self):\n return self.name\n\n def diagram(self):\n top = []\n bottom = []\n\n for branch in self.branches:\n inbottom = False\n for br in bottom:\n inbottom = (br[0] == branch[0]) or inbottom\n\n (top if inbottom else bottom).append(branch)\n\n lines = []\n longest = 0\n for branch in top:\n longest = max(branch[1], longest)\n\n for i in range(longest, 0, -1):\n line = \"\"\n branches = [item for item in top if item[1] >= i]\n\n for branch in branches:\n spaces = \" \" * ((branch[0]-1) - (len(line)//2) - 1)\n line += \" \" + spaces + \"C\"\n\n line = \" \" + line\n lines.append(line)\n lines.append(line.replace(\"C\", \"|\"))\n\n root = (\"C-\" * self.root)[:-1]\n lines.append(root)\n\n longest = 0\n for branch in bottom:\n longest = max(branch[1], longest)\n\n for i in range(0, longest):\n line = \"\"\n branches = [item for item in bottom if item[1] > i]\n\n for branch in branches:\n spaces = \" \" * ((branch[0]-1) - (len(line)//2) - 1)\n line += \" \" + spaces + \"C\"\n\n line = \" \" + line\n\n lines.append(line.replace(\"C\", \"|\"))\n lines.append(line)\n\n return \"\\n\".join(lines)\n","sub_path":"organic/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"340178928","text":"import http.client\nimport time\nfrom datetime import datetime\nimport logging\nimport random\n\narticle_id_list=['51519003','9532263','9532289','9532345','16917667','16932049','51519003',\n '17254871','49805185','51426590','51496360']\nindex = 0\nsize = len(article_id_list)\nwhile True:\n time.sleep(random.randint(20,30))\n # print(datetime.now())\n logging.error(datetime.now())\n conn = http.client.HTTPConnection('blog.csdn.net')\n conn.request(\"GET\",\"/fengyutubu/article/details/%s\"%article_id_list[index])\n index = index + 1\n if index > size-1:\n time.sleep(60)\n index = 0\n reps = conn.getresponse()\n print(reps.status,reps.reason)\n # data = reps.read()\n time.sleep(5)\n conn.close()","sub_path":"blogscrap/http_scrap.py","file_name":"http_scrap.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"599837853","text":"#import numpy as np\nimport cv2\nimport os\n\nvideo_name='test_mall.mp4'\ndstdir='imgs_small'\nif os.path.exists(dstdir):\n os.mkdir(dstdir)\nINTERVAL=1\n\n\ncap = cv2.VideoCapture(video_name)\ncnt=0\nwhile(cap.isOpened()):\n ret, frame = cap.read()\n if ret==True:\n cnt+=INTERVAL\n imname=os.path.join(dstdir, str(cnt).zfill(6)+'.jpg')\n cv2.imwrite(imname,frame)\n\n# Release everything if job is finished\ncap.release()\n","sub_path":"cv/take_frame.py","file_name":"take_frame.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"98976041","text":"\"\"\"Learn to classify a non-linear example using a multilayer net.\"\"\"\n\nimport random\n\nimport matplotlib.pyplot as plot\nimport numpy as np\n\nimport learning\nimport function\nimport nn_lib\n\ndef build_graph(input_width, layer_sizes, activation_function):\n \"\"\"Create deep neural classifier with cross-entropy loss.\"\"\"\n graph = function.FunctionGraph()\n inp = graph.add_variable(\"x\", (1, input_width)) # input\n for i, layer_size in enumerate(layer_sizes):\n inp = graph.add_function(\"layer_%d\" % i,\n nn_lib.FullyConnected, 1,\n input_width, layer_size,\n activation_function).set_inputs(inp)\n input_width = layer_size\n\n # Compute output.\n graph.add_function(\n \"output\", nn_lib.FullyConnected, 1,\n input_width, 1, nn_lib.Sigmoid).set_inputs(inp)\n\n graph.add_variable(\"target\", (1, 1)) # target\n\n # Create loss function.\n graph.add_function(\"loss\", nn_lib.CrossEntropy).set_inputs(\n \"output\", \"target\")\n\n return graph\n\ndef gen_training_data():\n \"\"\"Create training data.\"\"\"\n max_n_examples = 400\n training_data = []\n\n for _ in xrange(max_n_examples):\n x, y = (np.random.random(2) - 0.5) * 2\n if not filter_examples(x, y):\n continue\n classification = gen_classification(x, y)\n\n training_data.append({\n \"x\": np.array([[x, y]]),\n \"target\": np.array([[1.0 if classification else 0.0]])\n })\n return training_data\n\ndef gen_classification(x, y):\n \"\"\"Create boolean target tag.\"\"\"\n return x * y > 0\n\ndef filter_examples(x, y):\n \"\"\"Return true if random example should be kept.\"\"\"\n margin = 0.1\n return not (\n -margin <= x <= margin or\n -margin <= y <= margin)\n\ndef batches(total, batch_size):\n \"\"\"Yield total as batches of batch_size.\"\"\"\n i = 0\n while i < len(total):\n yield total[i : i + batch_size]\n i += batch_size\n\ndef random_batches(total, batch_size):\n \"\"\"Yield total as shuffled batches of batch_size.\"\"\"\n shuffled = list(total)\n random.shuffle(shuffled)\n return batches(shuffled, batch_size)\n\ndef main():\n \"\"\"Train deep classifier.\"\"\"\n graph = build_graph(2, [8, 4], nn_lib.ReLU)\n\n training_data = gen_training_data()\n\n trainer = learning.TrainingContext(\n graph, \"loss\",\n value_fun=lambda shape: 2 * (1 - 2 * np.random.random(size=shape)))\n\n def avg_loss():\n \"\"\"Compute average loss.\"\"\"\n sum_loss = 0.0\n for example in training_data:\n trainer.set_variables(example)\n loss = trainer.exec_context.compute(\"loss\")[0, 0]\n sum_loss += loss\n\n return sum_loss / len(training_data)\n\n batch_size = 1\n epochs = 30\n init_rate = 0.15\n decay_param = 1.5\n for i in xrange(epochs):\n rate = init_rate / (1 + i * decay_param)\n print(\"Epoch %d (avg_loss %g, alpha %g)\" % (i, avg_loss(), rate))\n for batch in random_batches(training_data, batch_size):\n learner = learning.ConstantRate(rate)\n trainer.train(batch, learner)\n\n print(\"Average loss: %g\" % (avg_loss()))\n\n print(\"Parameter values:\")\n for fun, val in trainer.parameters.iteritems():\n print(\"== %s:\\n%s\" % (fun.identifier, val))\n\n plot_data_x = np.array([example[\"x\"][0, 0] for example in training_data])\n plot_data_y = np.array([example[\"x\"][0, 1] for example in training_data])\n classification = gen_classification(plot_data_x, plot_data_y)\n color = np.where(classification, \"r\", \"b\")\n\n grid_resolution = 32\n preds = np.zeros((grid_resolution, grid_resolution))\n for grid_x in xrange(grid_resolution):\n for grid_y in xrange(grid_resolution):\n x = (grid_x + 0.5) * 2.0 / grid_resolution - 1\n y = (grid_y + 0.5) * 2.0 / grid_resolution - 1\n\n trainer.set_variables({\n \"x\": [[x, y]],\n \"target\": [[0]]})\n val = trainer.exec_context.compute(\"output\")[0, 0]\n preds[grid_x, grid_y] = val\n\n plot.imshow(preds.transpose(), extent=[-1, 1, -1, 1], interpolation=\"bicubic\")\n plot.scatter(plot_data_x, plot_data_y, c=color)\n plot.show()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"demo/multilayer.py","file_name":"multilayer.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"464640469","text":"from collections import Counter\n\ndef is_square(x):\n x = x**0.5\n return int(x) == x\n\ndef expected_counter(size):\n x = int(size**0.5)\n return Counter({3:4, 5:4*(x-2), 8:(x-2)**2})\n\ndef validate_board(graph):\n if graph is None:\n return False\n nodes = graph.nodes()\n size = len(nodes)\n if not is_square(size):\n return False\n c = Counter(graph.node_order(n) for n in nodes)\n\n return c == expected_counter(size)\n \n","sub_path":"square_grid_validator.py","file_name":"square_grid_validator.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"585775913","text":"#!/usr/bin/python\n# -*- coding: iso-8859-1 -*-\n\n# This file defines fitness to find the coeficients of a polinomial giving a set of values x, p(x)\n\n# how to use SALGA\n\n# first, define a fenotype function (optional): given a chromosome returns an individual\n# If defined, it's used only to print the best generation individual\n\ndef phenotype (chromosome):\n\tres = ''\n\tfor g in chromosome:\n\t\tres += \"%4.2f \" % (g)\n\tres += '(MAE: %4.2f)' % MAE(chromosome)\n\treturn res\n\n\n# second, define a fitness function (mandatory): given an chromosome, returns a number indicating the goodness of that chromosome\n\nimport math\n\ndef poli (chromosome,x): # evaluates a polinomial with coeficientes in chromosome for x\n\tres = 0.0\n\tl = len(chromosome)\n\tfor t in range(l):\n\t\texp = l-t-1\n\t\tres += chromosome[t]*math.pow(x,exp)\n\treturn res\n#\treturn chromosome[3]*math.pow(x,3) + chromosome[2]*math.pow(x,2) + chromosome[1]*x + chromosome[0]\n\n# target coeficients to search\ntarget = [2.0, 3.0, 5.0, 7.0, 11.0]\n\nimport tkSimpleDialog\nst = tkSimpleDialog.askstring('Target', 'Enter coeficients', initialvalue=target)\ntarget = eval(st)\n\n\n# calculates set of points to evaluate error\nx = [p * 0.1 for p in range(-100, 101)]\ny = []\nfor v in x:\n\ty.append(poli(target,v))\n\n\n# now is the fitness function\n\ndef MAE (chromosome):\n\terror = 0.0\n\tfor i in range(len(x)):\n\t\terror += math.fabs(y[i]-poli(chromosome,x[i]))\n\treturn error / len(x)\n\ndef fitness (chromosome):\n\terror = MAE(chromosome)\n\treturn 1.0 / (1.0 + error)\n\n\n# third: force parameters\n\nparameters = { 'alphabet':[0, 15], 'type':'floating', 'elitism':False, 'norm':True, 'chromsize':5, 'pmut':0.1, 'pcross':0.5, 'target':0.999 }\n","sub_path":"fitness/polinomio-pro.py","file_name":"polinomio-pro.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"434765861","text":"import re\nimport argparse\nimport multiprocessing\nfrom tabulate import tabulate\n\nfrom smartertodo.comments import get_comments_from_directory\nfrom smartertodo.GithubIntegration import GithubIntegration\nfrom smartertodo.models.Issue import Issue\n\n\ndef extract_data_from_text(text):\n # TODO: add regexe(s) for: assignees and body?\n return\\\n re.search(r'TODO:(.*?)\\[', text),\\\n re.search(r'\\[(.*?)\\]', text),\\\n re.search(r'([0-9]+[m|h])', text)\n\n\ndef get_issues_from_comments(comments):\n return [\n Issue(\n str.strip(data[0].group(1)),\n data[1].group(1).replace(' ', '').split(',')\n if len(data) > 1 and data[1] else None,\n data[2].group(1)\n if len(data) > 2 and data[2] else None\n ) for data in filter(\n lambda x: len(x) > 0 and x[0] is not None, [\n extract_data_from_text(comment.value.replace('\\n', ''))\n for comment in comments\n ]\n )\n ]\n\n\ndef get_issues(directory, print_directories=False):\n return get_issues_from_comments(\n get_comments_from_directory(\n directory,\n print_directories\n )\n )\n\n\ndef run():\n parser = argparse.ArgumentParser(description='Create issues from todo(s).')\n parser.add_argument(\n '--path',\n dest='project_path',\n type=str,\n help='Absolute path to the folder to be scanned',\n required=True\n )\n parser.add_argument(\n '--owner',\n dest='owner',\n type=str,\n help='owner'\n )\n parser.add_argument(\n '--target',\n dest='target',\n type=str,\n help='target repo'\n )\n parser.add_argument(\n '--cpus',\n dest='CPUs',\n type=int,\n help='allowed threading capabilities',\n default=multiprocessing.cpu_count()\n )\n parser.add_argument(\n '--dry',\n type=bool,\n help='Only print issues'\n )\n parser.add_argument(\n '--print-directories',\n type=bool,\n default=False,\n help='Print directories'\n )\n\n config = parser.parse_args()\n\n issues = get_issues(config.project_path, config.print_directories)\n\n if config.dry:\n print(tabulate([issue.__dict__ for issue in issues], headers='keys'))\n else:\n def create_issue(issue):\n print('Creating: {}'.format(issue.title))\n print(vars(issue))\n\n GithubIntegration(config.owner, config.target).createIssue(\n issue.to_github_issue()\n )\n\n multiprocessing.Pool(processes=config.CPUs).map(create_issue, issues)\n\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"smartertodo/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"242664287","text":"import cv2\nfrom detector import Detector\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Button\nfrom detector import Detector\nfrom database import Database\nimport camera\nfrom matplotlib.patches import Rectangle\nimport time\nimport numpy as np\nfrom profile import Profile\n\nmodel = Detector()\ndatabase = Database()\nfig, ax = plt.subplots()\ndatabase.load('database.pkl')\n\ndef detect_live():\n #plt.ion()\n fig, ax = plt.subplots()\n capture = cv2.VideoCapture(0)\n #show_image = ax.imshow(capture.read()[1])\n while True:\n ret, img = capture.read()\n if not ret:\n continue\n img = img[:,:,::-1]\n detect_and_draw(img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ndef detect_once():\n capture = camera.take_picture()\n unknown = detect_and_draw(capture)\n unknownCase(capture,unknown)\n\ndef draw_bounding_box(img,name,box):\n img = cv2.rectangle(img, (int(box[0]),int(box[1])), (int(box[2]), int(box[3])), (36,255,12), 1)\n cv2.putText(img, name, (int(box[0]), int(box[1])-10), cv2.FONT_HERSHEY_SIMPLEX, 0.002*img.shape[0], color=(36,255,12), thickness=1)\n return img\n\n\ndef detect_and_draw(img):\n boxes = model.detect(img)\n vectors = model.get_vectors(img,boxes)\n img = img[:,:,::-1]/255.0\n unknown = []\n for i,(box,vector) in enumerate(zip(boxes,vectors)):\n name = database.search(vector)\n print(name)\n # draw the box on the screen\n if name is None:\n unknown.append((box,vector))\n name = \"Unknown\"\n \n img = draw_bounding_box(img,name,box)\n #print(\"Bounding box size: \", box[2]-box[0], box[3]-box[1])\n # height = box[2]-box[0]\n # width = box[3]-box[1]\n\n # if height*width >= 0.15*img.size:\n #cv2.imshow('output',img)\n #print(img.shape)\n cv2.imshow('output',img)\n return unknown\n\ndef unknownCase(img,unknown):\n for box,vector in unknown:\n img2 = draw_bounding_box(img.copy(),\"who is this?\",box)\n cv2.imshow('query',img2)\n newname = input(\"Please insert new profile name (or none if not a face): \")\n if newname == \"none\":\n continue\n database.add(Profile(newname, vector))\n print(\"Success: new profile [ \" + newname + \" ] added\")\n\ndef detect_from_file(img_path):\n img = cv2.imread(img_path)\n print(img)\n img2 = img[:,:,::-1].copy()\n unknown = detect_and_draw(img2)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n unknownCase(img,unknown)\n\nprint(\"Choose A Detection Method:\")\nprint(\"(1) Detect Live\")\nprint(\"(2) Detect From File\")\n\ndetect_method = int(input())\nif detect_method == 1:\n detect_live()\nelif detect_method == 2:\n file_name = input('Enter the file name:')\n detect_from_file(file_name)\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"83718622","text":"class Solution(object):\n def findTheDifference(self, s, t):\n \"\"\"\n :type s: str\n :type t: str\n :rtype: str\n \"\"\"\n if not s: return t\n elif not t: return s\n c1, c2 = ord(s[0]), ord(t[0])\n for i in range(1, len(s)):\n c1 ^= ord(s[i])\n for i in range(1, len(t)):\n c2 ^= ord(t[i])\n return chr(c1 ^ c2)","sub_path":"find_the_difference.py","file_name":"find_the_difference.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"144619182","text":"#!/usr/bin/python\n# coding: utf_8\n\nimport os, shutil, pwd\nimport time\nimport modules as tm\n\n\nosf = u'Защита данных пользователя'\nname = u'Ролевое управление доступом'\nosfNum = 2\nnum = 12\nstages = 1\nparams = ['firstUserName', 'firstUserPass', 'testDir']\nprogress = '2/3'\n\n\ntestInfo = dict(osf=osf, name=name, osfNum=osfNum, num=num, stages=stages, params=params)\n\ntm.launchTest(testInfo)\n\ntest = tm.runTest(testInfo)\nfirstUser = test.params['firstUserName']\nfirstUserPass = test.params['firstUserPass']\ntestDir = test.params['testDir']\n\nfile1 = '%s/systemd_webadm.te' % testDir\nfile2 = '%s/systemd_webadm.mod' % testDir\nfile3 = '%s/systemd_webadm.pp' % testDir\nfile4 = '/etc/selinux/targeted/contexts/users/webadm_u'\nfile5 = '/etc/sudoers'\n\ntry:\n # info--------------------------------------------------------------------------------------------------------------\n test.showInfoBlock()\n\n\n # set up------------------------------------------------------------------------------------------------------------\n test.showSetUpBlock()\n\n test.createCopyFile(file5)\n\n file = open(file1, \"w\")\n file.write('module systemd_webadm 1.0;\\n')\n file.write('require {\\n')\n file.write('\ttype webadm_t;\\n')\n file.write('\ttype init_t;\\n')\n file.write('\ttype policykit_t;\\n')\n file.write('\ttype system_dbusd_t;\\n')\n file.write('\tclass capability sys_resource;\\n')\n file.write('\tclass unix_stream_socket connectto;\\n')\n file.write('\tclass dbus send_msg;\\n')\n file.write('\tclass system status;\\n')\n file.write('\tclass process setrlimit;\\n')\n file.write('}\\n')\n file.write('#============= policykit_t ==============\\n')\n file.write('allow policykit_t webadm_t:dbus send_msg;\\n')\n file.write('#============= webadm_t ==============\\n')\n file.write('allow webadm_t init_t:system status;\\n')\n file.write('#!!!! This avc is allowed in the current policy\\n')\n file.write('allow webadm_t policykit_t:dbus send_msg;\\n')\n file.write('allow webadm_t self:capability sys_resource;\\n')\n file.write('#!!!! This avc is allowed in the current policy\\n')\n file.write('allow webadm_t system_dbusd_t:dbus send_msg;\\n')\n file.write('#!!!! This avc is allowed in the current policy\\n')\n file.write('allow webadm_t system_dbusd_t:unix_stream_socket connectto;\\n')\n file.write('allow webadm_t self:process setrlimit;\\n')\n file.close()\n test.showActionMsg('create file %s' % file1)\n\n\n test.runCmdFromRoot(cmd=\"checkmodule -M -m %s -o %s\" % (file1, file2), code=0)\n test.runCmdFromRoot(cmd=\"semodule_package -o %s -m %s\" % (file3, file2), code=0)\n test.runCmdFromRoot(cmd=\"semodule -i %s\" % file3, code=0)\n\n\n test.installPack('httpd')\n\n\n # testing-----------------------------------------------------------------------------------------------------------\n test.showTestingBlock()\n\n test.runCmdFromRoot(cmd=\"semanage user -a -R 'staff_r system_r webadm_r' -L s0 -r s0 webadm_u\", code=0)\n\n\n res = test.runCmdFromRoot(cmd='semanage user -l | grep webadm_u', code=0)['output']\n if res == '':\n test.addResult(msg='Неудачная попытка создания пользователя SELinux webadm_u', wait='', taken=res)\n\n\n test.runCmdFromRoot(cmd=\"semanage login -a -r s0 -s webadm_u %s\" % firstUser, code=0)\n\n file = open(file4, \"w\")\n file.write('system_r:local_login_t:s0 staff_r:staff_t:s0 sysadm_r:sysadm_t:s0\\n')\n file.write('system_r:remote_login_t:s0 staff_r:staff_t:s0\\n')\n file.write('system_r:sshd_t:s0 staff_r:staff_t:s0 sysadm_r:sysadm_t:s0\\n')\n file.write('system_r:crond_t:s0 staff_r:staff_t:s0\\n')\n file.write('system_r:xdm_t:s0 staff_r:staff_t:s0\\n')\n file.write('staff_r:staff_su_t:s0 staff_r:staff_t:s0\\n')\n file.write('staff_r:staff_sudo_t:s0 staff_r:staff_t:s0\\n')\n file.write('system_r:initrc_su_t:s0 staff_r:staff_t:s0\\n')\n file.write('staff_r:staff_t:s0 staff_r:staff_t:s0\\n')\n file.write('sysadm_r:sysadm_su_t:s0 sysadm_r:sysadm_t:s0\\n')\n file.write('sysadm_r:sysadm_sudo_t:s0 sysadm_r:sysadm_t:s0\\n')\n file.close()\n test.showActionMsg('create file %s' % file1)\n\n test.runCmdFromRoot(\"echo '%s ALL=(ALL) TYPE=webadm_t ROLE=webadm_r ALL' >> %s\" % (firstUser, file5))\n\n test.sshConnect(host=\"127.0.0.1\", user=firstUser, passwd=firstUserPass)\n\n res = test.sshRunCmd(cmd='id -Z', code=0)['output']\n if res != 'webadm_u:staff_r:staff_t:s0':\n test.addResult(msg='Несоответствие контекста безопасности', wait='webadm_u:staff_r:staff_t:s0', taken=res)\n\n test.sshRunCmd(cmd='echo \"# test comm\" >> /etc/httpd/conf/httpd.conf', code=1)\n test.sshRunCmd(cmd='echo \"Port 20000\" >> /etc/ssh/sshd_config', code=1)\n test.sshRunCmd(cmd='systemctl restart httpd', code=1)\n test.sshRunCmd(cmd='systemctl restart sshd', code=1)\n\n test.sshDisconnect()\n\n # test.runCmdFromUser(cmd=\"sudo -s id -Z\", code=0)\n # ДОДЕЛАТЬ\n\n\n\n\n\nexcept Exception as e:\n test.showError(e.message)\n\n\nfinally:\n try:\n # clear---------------------------------------------------------------------------------------------------------\n test.showEndBlock()\n\n test.exchangeCopyFile(file5)\n\n test.runCmdFromRoot(cmd='rm %s' % file4, code=0, remov=True)\n\n test.uninstallPack('httpd')\n\n test.runCmdFromRoot(cmd='semanage login -d %s' % firstUser, code=0, remov=True)\n test.runCmdFromRoot(cmd='semanage user -d webadm_u', code=0, remov=True)\n test.runCmdFromRoot(cmd='semanage module -r systemd_webadm', code=0, remov=True)\n\n except Exception as e:\n test.showError(e.message)\n\n finally:\n # result out----------------------------------------------------------------------------------------------------\n test.showResultBlock()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"tests/test12.py","file_name":"test12.py","file_ext":"py","file_size_in_byte":5858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"301555322","text":"\nimport json\nfrom pathlib import Path\nfrom urllib.parse import urlencode\n\n\"\"\"Keywords used in configuration\"\"\"\nCONFIG_KEYWORDS = [\n '@scheme',\n '@host',\n '@tld',\n '@path',\n '@query',\n '@fragment',\n '@subdomain',\n '@host_postfix'\n]\n\nALLOWED_URL_SCHEMES = [\n 'http',\n 'https'\n]\n\n\nclass UrlGenerator(object):\n def __init__(self, config_file_path, **kwargs):\n \"\"\"\n Args:\n config_file_path (str): Path to configuration file\n **kwargs: Global parameters\n\n Raises:\n UrlGeneratorException Ff the configuration file is not found.\n \"\"\"\n self.params = kwargs\n\n config_file = Path(config_file_path)\n if not config_file.is_file():\n raise UrlGeneratorException('Configuration file not found \"{}\"'.format(config_file_path))\n\n with open(config_file, 'r', encoding='utf8') as f:\n self.config = json.loads(f.read())\n\n def get_url(self, _path, **kwargs):\n \"\"\"Return compiled URL from given $pathString using configuration.\n\n See Readme.md for more information\n\n Args:\n _path (str): like 'heureka.category.index'\n **kwargs: Additional parameters like `category='auto-moto', page_index=10, lang='sk'`\n\n Returns:\n str: Compiled URL like 'https://auto-moto.heureka.sk?page=10'\n \"\"\"\n _path = _path.split('.')\n\n params = self.params.copy()\n params.update(kwargs)\n\n config = self._get_url_parts(_path, params, self.config)\n\n query_string = self._compile_query_string(config['@query'], params) if '@query' in config else ''\n\n url_template = self.url_join(config, query_string)\n\n return self.compile_url_template(url_template, params)\n\n def compile_url_template(self, url, params):\n \"\"\"Compile and sanitize URL template\n !Note that $params are deleted when used!\n\n Args:\n url (str): URL Template like 'http://{category_name}.heureka.{lang}/'\n params (dict): like `lang='cz', ...`\n\n Returns:\n str: Compiled template like 'http://auto-moto.heureka.cz/'\n\n Raises:\n UrlGeneratorException if any mandatory parameter is missing\n \"\"\"\n try:\n return url.format(**params)\n except KeyError:\n raise UrlGeneratorException('Missing mandatory parameter')\n\n def url_join(self, url_parts, query_string):\n \"\"\"Joins URL defined by configuration\n\n Args:\n url_parts (dict): like ['@sheme' => 'https, '@host' => 'www.heureka.{tld}', '@path' => 'search' ...]\n query_string (str): like 'q=automobily&offset=2&limit=10'\n\n Returns:\n str: Compiled URL like 'https://www.heureka.cz/search/?q=automobily&offset=2&limit=1'\n \"\"\"\n if '@scheme' not in url_parts:\n raise UrlGeneratorException('Missing required property @scheme')\n\n scheme = url_parts['@scheme']\n if scheme not in ALLOWED_URL_SCHEMES:\n raise UrlGeneratorException('Unsupported URL scheme: \"{}\"'.format(scheme))\n\n if '@host' not in url_parts:\n raise UrlGeneratorException('Missing required property @host')\n\n host = url_parts['@host'].rstrip('/')\n subdomain = url_parts.get('@subdomain')\n host_postfix = url_parts.get('@host_postfix')\n\n url = \"{}://{}{}{}\".format(\n scheme,\n f'{subdomain}.' if subdomain else '',\n host,\n f'.{host_postfix}' if host_postfix else ''\n )\n\n if '@path' in url_parts:\n url += '/{}'.format(url_parts['@path'].lstrip('/'))\n\n if query_string != '':\n url += '?{}'.format(query_string)\n\n if '@fragment' in url_parts:\n url += '#{}'.format(url_parts['@fragment'].lstrip('#'))\n\n return url\n\n def _compile_query_string(self, query_config, params):\n \"\"\"Compiles query string from parameters using queryConfig\n !Note that $params are deleted when used!\n\n Args:\n query_config (dict): configuration like `offset='o', limit='l'`\n params (dict): like `offset=2, limit=10`\n\n Returns:\n str: Compiled query like 'o=2&l=10'\n \"\"\"\n query_params = {}\n for param_name, param_key in query_config.items():\n if param_name in params:\n query_params[param_key] = params[param_name]\n\n return urlencode(query_params)\n\n def _get_url_parts(self, path, params, config):\n \"\"\"Gets url parts configuration recursively for given path\n\n Args:\n path (list): Like ['heureka', 'category', 'index'] means 'heureka.category.index'\n params (dict): Like `category='auto-moto', page_index=10, lang='sk'`\n config (dict): Main configuration array\n\n Returns:\n dict: Containing urlParts like ['@sheme' => 'https, '@host' => 'www.heureka.{tld}', '@path' => 'search' ...]\n \"\"\"\n url_parts = {}\n\n for key, value in config.items():\n if key in CONFIG_KEYWORDS:\n url_parts[key] = value\n\n if path and key == path[0]:\n _path = path.copy()\n _path.pop(0)\n url_parts.update(self._get_url_parts(_path, params, config[key]))\n\n if self._evaluate_template_condition(key, params):\n url_parts.update(self._get_url_parts(path, params, config[key]))\n\n return url_parts\n\n def _evaluate_template_condition(self, condition, params):\n \"\"\"Evaluates condition like \"{variable}=value\" if variable equals value.\n\n Args:\n condition (str):\n params:\n\n Returns:\n bool\n \"\"\"\n if '{' not in condition or not params:\n return False\n try:\n parts = condition.split('=')\n\n return parts[0].format(**params) == parts[1].format(**params)\n except KeyError:\n return False\n\n\nclass UrlGeneratorException(Exception):\n pass\n","sub_path":"url_generator/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"254690762","text":"HOST = 'localhost'\nPORT = 18181\n\n# Creds\nCOMPANY_NAME = 'Chat2Desk'\nTOKEN = 'TOKEN123456789'\n\nSENDER_AUTH = 'sender@auth'\nCAPTURE_SERVER_IP = '127.0.0.1'\nCAPTURE_SERVER_FQDN = \"pushapi-python.infowatch.ru\"\nSENDER_DNS = \"localhost.computer.domain\"\nCONTEXT_SENDER_IDENTITY_ID = 1\n\nRECEIVER_RES_ID = 2\n\nCHUNK_SIZE = 64 # kb\n","sub_path":"REPORTS-179_Infowatch/user_constsants.py","file_name":"user_constsants.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"395059856","text":"from bs4 import BeautifulSoup\nfrom csv import DictWriter\nfrom config import writePath\nimport urllib2\n\n\ndef getMNSenate(partyDict):\n soup = BeautifulSoup(urllib2.urlopen('http://www.senate.leg.state.mn.us/members/index.php?ls=#dist').read())\n table = soup.find('div', {'id': 'hide_show_alpha_all'})\n links = table.find_all('a')\n dictList = []\n for link in links:\n repInfo = {}\n if link.find('b') is not None:\n identity = link.find('b').string\n repInfo['District'] = 'MN State Senate District ' + str(int(identity.split(\"(\")[1].split(\",\")[0].strip()))\n repInfo['Name'] = identity.split(\"(\")[0].strip().replace(\" \", \" \").replace(\" \", \" \")\n repInfo['Party'] = partyDict[identity.split(',')[len(identity.split(',')) - 1].strip().replace(')', '')]\n repInfo['Website'] = 'http://www.senate.leg.state.mn.us' + link.get('href')\n dictList.append(repInfo)\n return dictList\n\n\nif __name__ == '__main__':\n partyDict = {'R': 'Republican', 'D': 'Democratic', '': 'Unknown', 'I': 'Independent', 'Democrat': 'Democratic', 'Republican': 'Republican', 'Democratic': 'Democratic', 'Independent': 'Independent', 'DFL': 'Democratic-Farmer Labor'}\n dictList = getMNSenate(partyDict)\n with open(writePath + 'MNSenate.csv', 'w') as csvFile:\n dwObject = DictWriter(csvFile, ['District', 'Name', 'Website', 'Party'])\n dwObject.writeheader()\n for row in dictList:\n dwObject.writerow(row)\n","sub_path":"attended/SL/MNSenate.py","file_name":"MNSenate.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"526304778","text":"# coding=utf-8\nimport datetime\nfrom . import admin_user\nfrom flask import render_template, request, session, json, url_for, redirect, jsonify\nfrom app.constants import *\nfrom sqlalchemy import and_\nfrom app.utils.response_code import RET\n\n\n# 登录拦截\n@admin_user.before_request\ndef check():\n if 'admin_name' not in session and \"admin/user/login\" not in request.url:\n return redirect(\"/admin/user/login\")\n else:\n pass\n\n\n# 登录\n@admin_user.route('/login', methods=['GET', 'POST'])\ndef login():\n from app.models import User\n import app.models\n # POST请求\n if request.method == \"POST\":\n username = request.form.get('username')\n password = request.form.get('password')\n user = User.query.filter(and_(User.is_admin == 1, User.nick_name == username)).first()\n if user != None and user.check_password(password):\n session['admin_name'] = username\n return render_template('admin/index.html', user=user)\n else:\n msg = \"用户名或密码错误\"\n return render_template('admin/login.html', user=user, msg=msg)\n # GET请求\n if request.method == \"GET\":\n if 'admin_name' in session:\n session.pop('admin_name')\n return render_template('admin/login.html')\n\n\n# 获取用户统计界面\n@admin_user.route('/count')\ndef count():\n from app.models import User\n # 总人数\n total = User.query.count()\n # 月新增用户\n total_month = 0\n # 日新增用户\n total_day = 0\n user_list = User.query.all()\n now = datetime.datetime.now()\n for i in user_list:\n if (now - i.create_time).days < 30:\n total_month += 1\n if (now - i.create_time).days < 1:\n total_day += 1\n i = 1\n times = []\n nums = []\n # 获取最近12天的日期,并转化为str格式存到数组中\n while (i <= 12):\n t = now + datetime.timedelta(days=-i + 1)\n times.append(str(t.strftime('%Y-%m-%d')))\n i += 1\n times = times[::-1]\n i = 0\n user_list = User.query.all()\n # 计算最近12天的用户活跃数量\n while (i < 12):\n count = 0\n for j in user_list:\n if (now - j.last_login).days > i - 1 and (now - j.last_login).days < i + 1:\n count += 1\n nums.append(count)\n i += 1\n nums = nums[::-1]\n return render_template('admin/user_count.html', total=total, total_month=total_month, total_day=total_day,\n times=times, nums=nums)\n\n\n# 获取用户列表界面\n@admin_user.route('/list')\ndef list():\n from app.models import User\n cur_page = int(1)\n page = User.query.order_by(User.create_time.desc()).paginate(cur_page, ADMIN_USER_PAGE_MAX_COUNT)\n user_list = page.items\n total_page = page.pages\n return render_template('admin/user_list.html', user_list=user_list, cur_page=cur_page, total_page=total_page)\n\n\n# 分页请求\n@admin_user.route('/getList')\ndef getList():\n from app.models import User\n # 请求哪一页\n cur_page = int(request.args.get('p'))\n page = User.query.order_by(User.create_time.desc()).paginate(cur_page, ADMIN_USER_PAGE_MAX_COUNT)\n user_list = page.items\n total_page = page.pages\n return render_template('admin/user_list.html', user_list=user_list, cur_page=cur_page, total_page=total_page)\n","sub_path":"app/admin/admin_user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"278972892","text":"import serial\nfrom lib import navtelecom\n\nsmart = serial.Serial('COM3', 9600, serial.EIGHTBITS, serial.PARITY_NONE, serial.STOPBITS_ONE)\nsmart.timeout = 10\n\ndev = navtelecom.Navtelecom(smart)\ndata = dev.send('@NTC', '*?A', 1, 0)\n\nresponse = dev.read()\nif(response):\n print(response)\n\nsmart.close()\n","sub_path":"navtelecom.py","file_name":"navtelecom.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"567931586","text":"from route4me import Route4Me\n\nKEY = \"11111111111111111111111111111111\"\n\n\ndef main():\n route4me = Route4Me(KEY)\n route = route4me.route\n response = route.get_routes(limit=10, Offset=5)\n if hasattr(response, 'errors'):\n print('. '.join(response.errors))\n else:\n response = route.get_activities(route_id=response[0].route_id,\n limit=10,\n Offset=5)\n if hasattr(response, 'errors'):\n print('. '.join(response.errors))\n else:\n for i, activity in enumerate(response.results):\n print('Activity #{}'.format(i + 1))\n print('\\tActivity ID: {}'.format(activity.activity_id))\n print('\\tActivity Message: {}'.format(\n activity.activity_message\n ))\n print('\\tActivity Type: {}'.format(activity.activity_type))\n print('\\tRoute ID: {}'.format(activity.route_id))\n print('\\tRoute Name: {}'.format(activity.route_name))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/activities/get_activities.py","file_name":"get_activities.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"518838141","text":"#!/usr/bin/env python3.6\n\nimport csv\nimport sqlite3\nimport sys\nimport datetime\n\npair=sys.argv[1]\nds=sys.argv[2]\nde=sys.argv[3]\n\npcnv={}\npcnv['usdjpy'] = 'USD/JPY'\npcnv['eurjpy'] = 'EUR/JPY'\npcnv['gbpjpy'] = 'GBP/JPY'\npcnv['audjpy'] = 'AUD/JPY'\npcnv['nzdjpy'] = 'NZD/JPY'\npcnv['cadjpy'] = 'CAD/JPY'\npcnv['chfjpy'] = 'CHF/JPY'\npcnv['zarjpy'] = 'ZAR/JPY'\n\ndef date_cnv(dtstr):\n return dtstr.replace('/','').replace(' ','').replace(':','')\n\ndef strtodt(s):\n d=datetime.datetime(\\\n int(s[0:4]),\\\n int(s[4:6]),\\\n int(s[6:8]),\\\n int(s[8:10]),\\\n int(s[10:12]),\\\n int(s[12:14]))\n return d\n\nconn = sqlite3.connect(\"../db/chart_nano.db\")\n\nsql = \"select dt, b_st, b_hi, b_lo, b_en, a_st, a_hi, a_lo, a_en from chart where pair='{pair}' and ashi='1分足' and dt >= '{ds}' and dt <= '{de}' order by dt\".format(pair=pcnv[pair], ds=ds, de=de)\nprint(sql)\n\nsqltmplate=\"insert into tick(dt, bid, ask)values('{date}',{bid},{ask});\"\nfor r in conn.execute(sql):\n dtstr=date_cnv(r[0])\n dt=strtodt(dtstr)\n b_st=r[1]\n b_hi=r[2]\n b_lo=r[3]\n b_en=r[4]\n a_st=r[5]\n a_hi=r[6]\n a_lo=r[7]\n a_en=r[8]\n outs=sqltmplate.format(date=dt.strftime('%Y%m%d%H%M%S'), bid=b_st, ask=a_st)\n print(outs)\n\n dt=dt + datetime.timedelta(seconds=1)\n outs=sqltmplate.format(date=dt.strftime('%Y%m%d%H%M%S'), bid=b_lo, ask=a_lo)\n print(outs)\n\n dt=dt + datetime.timedelta(seconds=1)\n outs=sqltmplate.format(date=dt.strftime('%Y%m%d%H%M%S'), bid=b_hi, ask=a_hi)\n print(outs)\n\n outs=sqltmplate.format(date=dt.strftime('%Y%m%d%H%M') + '59', bid=b_en, ask=a_en)\n print(outs)\n\nconn.close()\n","sub_path":"tmppg/create_inssql.py","file_name":"create_inssql.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"582050749","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# NCTR, Nile Center for Technology Research\n# Copyright (C) 2011-2012 NCTR ().\n#\n##############################################################################\n\nfrom osv import fields, osv\n\n#----------------------------------------\n# Class building maintenance wizard\n#----------------------------------------\nclass building_maintenance_wizard(osv.osv_memory):\n\n _name = \"building.maintenance.wizard\"\n _description = \"Building maintenance wizard\"\n\n STATE_SELECTION = [\n ('completed', 'Completed orders'),\n ('incomplete', 'Incomplete orders'), ]\n\n _columns = {\n 'date_from': fields.date('From', required=True,), \n 'date_to': fields.date('To', required=True),\n 'wizard_type': fields.selection([('by_building','By building'),('by_partner','By partner')],'Wizard type'),\n 'maintenance_type': fields.many2one('building.maintenance.type', 'Maintenance type'),\n 'state': fields.selection(STATE_SELECTION,'State',), \n 'building_id': fields.many2one('building.manager', 'Building',),\n 'partner_id':fields.many2one('res.partner', 'Partner'),\n }\n\n def print_report(self, cr, uid, ids, context=None):\n data = self.read(cr, uid, ids, [], context=context)[0]\n datas = {\n 'ids': [],\n 'model': 'building.maintenance',\n 'form': data,\n }\n return {\n 'type': 'ir.actions.report.xml',\n 'report_name': 'building_maintenance.report',\n 'datas': datas,\n }\nbuilding_maintenance_wizard()\n \n","sub_path":"v_7/GDS/shamil_v3/building_management_6.1/wizard/building_maintenance_wizard.py","file_name":"building_maintenance_wizard.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"50315873","text":"import misc\n\ndef check(board, game):\n #check only double ships in every row\n for row in board:\n row = \"\".join(str(x) for x in row)\n if row.count(str(misc.SHIP)*3) > 0:\n misc.fail(\"3 long ships (or two ships touching horizontally) \"+str(board))\n if len(row) != game.board_size:\n misc.fail(\"Incorrect row length\")\n #check no touching in any column\n for x in range(game.board_size):\n column = \"\".join(str(row[x]) for row in board)\n if column.count(str(misc.SHIP)*2) > 0:\n misc.fail(\"Ships touching vertically\")\n #check number of ships correct\n board_string = \"\".join(\"\".join(str(x) for x in row) for row in board)\n if board_string.count(str(misc.SHIP)*2) != game.num_ships:\n misc.fail(\"Incorrect number of ships\")\n return False\n return True\n","sub_path":"misc/board_checker.py","file_name":"board_checker.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"126491490","text":"# coding: utf-8\nimport os\nimport pandas as pd\nimport random\nfrom czsc.utils import echarts_plot as plot\nfrom czsc.analyze import CZSC, RawBar\nfrom czsc.enum import Freq\n\ncur_path = os.path.split(os.path.realpath(__file__))[0]\n\n\ndef test_heat_map():\n data = [{\"x\": \"{}hour\".format(i), \"y\": \"{}day\".format(j), \"heat\": random.randint(0, 50)}\n for i in range(24) for j in range(7)]\n x_label = [\"{}hour\".format(i) for i in range(24)]\n y_label = [\"{}day\".format(i) for i in range(7)]\n hm = plot.heat_map(data, x_label=x_label, y_label=y_label)\n file_html = 'render.html'\n hm.render(file_html)\n os.remove(file_html)\n\n\ndef test_kline_pro():\n file_kline = os.path.join(cur_path, \"data/000001.SH_D.csv\")\n kline = pd.read_csv(file_kline, encoding=\"utf-8\")\n bars = [RawBar(symbol=row['symbol'], id=i, freq=Freq.D, open=row['open'], dt=row['dt'],\n close=row['close'], high=row['high'], low=row['low'], vol=row['vol'])\n for i, row in kline.iterrows()]\n ka = CZSC(bars)\n\n # bs = []\n # for x in ka.bi_list:\n # if x.fx_b.mark == Mark.D:\n # mark = \"buy\"\n # else:\n # mark = \"sell\"\n # bs.append({\"dt\": x.f, \"mark\": mark, mark: x.fx_b.fx})\n #\n # chart = plot.kline_pro(ka.bars_ubi, fx=ka.fx_list, bi=ka.bi_list, xd=ka.xd_list, bs=bs)\n # chart.render()\n\n","sub_path":"test/test_plot.py","file_name":"test_plot.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"146329145","text":"\"\"\"\nTemplate Tag for Handling the Login links/dropdowns in templates\n\"\"\"\nfrom base64 import b64encode, b32encode\nfrom io import BytesIO\n\nfrom django import template\n\nfrom hub_app.authlib.totp.qr import create_png_qr_code\n\nregister = template.Library() # pylint: disable=invalid-name\n\n\n@register.inclusion_tag('hub_app/inclusion/inline-qr-code.html', name='inline_otp_qr_code', takes_context=False)\ndef do_inline_otp_qr_code(username: str, secret: bytes) -> dict:\n \"\"\"\n Create an OTP QR Code Inline Image\n \"\"\"\n with BytesIO() as buffer:\n secret_as_base32 = b32encode(secret)\n create_png_qr_code(username, secret_as_base32, block_size=8).save(buffer)\n buffer_contents = buffer.getvalue()\n buffer_contents_as_base64 = b64encode(buffer_contents)\n buffer_contents_as_string = buffer_contents_as_base64.decode('us-ascii')\n img_src = 'data:image/png;base64,{}'.format(buffer_contents_as_string)\n return {\n 'qr_img_data': img_src\n }\n","sub_path":"hub_app/templatetags/hub_app_otp.py","file_name":"hub_app_otp.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"100086583","text":"import os\nimport argparse\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-n\", \"--name\", help=\"image folder name\", type=str, default= 'rose', required=False)\n args = parser.parse_args()\n ingredient_type = args.name\n ingredient_type = ['basil+fresh']\n if 'multi' in ingredient_type:\n path = \"images/\" + ingredient_type\n filelist = os.listdir(path)\n for file in filelist:\n try:\n rename_multi_in(ingredient_type,file)\n except:\n print(\"No this folder\")\n else:\n for ingredient in ingredient_type:\n rename_sig_in(ingredient)\n\ndef rename_multi_in(ingredient_type, subtype):\n path= \"images/\" + ingredient_type +'/' + subtype\n name= subtype\n startNumber= '1'\n fileType= '.jpg'\n print(\"rename files as: \"+name+startNumber+fileType)\n count=0\n filelist=os.listdir(path)\n for files in filelist:\n if 'jpg' in files:\n fileType = '.jpg'\n elif 'png' in files:\n fileType = '.png'\n Olddir=os.path.join(path,files)\n if os.path.isdir(Olddir):\n continue\n Newdir=os.path.join(path,name+str(count+int(startNumber))+fileType)\n os.rename(Olddir,Newdir)\n count+=1\n print(\"Totally rename \"+str(count)+\" files\")\n\ndef rename_sig_in(ingredient_type):\n path = \"images/\" + ingredient_type\n name = ingredient_type\n startNumber = '1'\n fileType = '.jpg'\n print(\"rename files as: \"+name+startNumber+fileType)\n count=0\n filelist=os.listdir(path)\n for files in filelist:\n Olddir=os.path.join(path,files)\n if os.path.isdir(Olddir):\n continue\n Newdir=os.path.join(path,name+str(count+int(startNumber))+fileType)\n os.rename(Olddir,Newdir)\n count+=1\n print(\"Totally rename \"+str(count)+\" files\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"crawler_images/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"246364157","text":"#Day 13\r\n#Task 1\r\nimport re\r\ndef is_allowed_specific_char(string):\r\n charRe = re.compile(r'[^a-zA-Z0-9.]')\r\n string = charRe.search(string)\r\n return not bool(string)\r\n\r\nprint(is_allowed_specific_char(\"ABCDEFabcdef123450\")) \r\nprint(is_allowed_specific_char(\"*&%@#!}{\"))\r\n\r\n#Task 2\r\ndef text_match(text):\r\n patterns = '\\w*z.\\w*'\r\n if re.search(patterns, text):\r\n return 'Found a match!'\r\n else:\r\n return('Not matched!')\r\n\r\nprint(text_match(\"The quick brown fox jumps over the lazy dog.\"))\r\nprint(text_match(\"Python Exercises.\"))\r\n\r\n#Task 3\r\ndef end_num(string):\r\n text = re.compile(r\".*[0-9]$\")\r\n if text.match(string):\r\n return True\r\n else:\r\n return False\r\n\r\nprint(end_num('abcdef'))\r\nprint(end_num('abcdef6'))\r\n\r\n#Task 4\r\nresults = re.finditer(r\"([0-9]{1,3})\", \"Exercises number 1, 12, 13, and 345 are important\")\r\nprint(\"Number of length 1 to 3\")\r\nfor n in results:\r\n print(n.group(0))\r\n\r\n#Task 5\r\ndef text_match(text):\r\n patterns = '^[a-zA-Z0-9_]*$'\r\n if re.search(patterns, text):\r\n return 'Found a match!'\r\n else:\r\n return('Not matched!')\r\n\r\nprint(text_match(\"The quick brown fox jumps over the lazy dog.\"))\r\nprint(text_match(\"Python_Exercises_1\"))\r\n","sub_path":"day13.py","file_name":"day13.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"254204846","text":"import urllib\nimport os\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nfrom settings import APK_FOLDER\n\ndef get_download_links():\n link_list = []\n next_available = True\n page = 1\n while next_available:\n try:\n url = \"https://cubapk.com/store/?page={}\".format(page)\n text = urlopen(url).read()\n soup = BeautifulSoup(text, features=\"lxml\")\n page += 1\n except urllib.error.HTTPError:\n break\n\n data = soup.findAll('div',attrs={'class':'app-meta'})\n for div in data:\n links = div.findAll('a')\n for a in links:\n link_list.append(\"https://cubapk.com\" + a['href'])\n return link_list\n\ndef download_apk(link, force_download=False):\n try:\n name = link.split('/')[-3]+'.apk' \n if name in os.listdir(APK_FOLDER) and not force_download:\n return True, 'App in cache'\n apath = os.path.join(APK_FOLDER, name)\n urllib.request.urlretrieve(link, apath)\n return True, None\n except urllib.error.HTTPError:\n return False, 'HTTP 404'\n \n\nif __name__ == '__main__':\n link_list = get_download_links()\n print('Found {}\\n\\n'.format(len(link_list)))\n for link in link_list:\n print(\"Downloading {}\".format(link))\n status, msg = download_apk(link)\n print('[{}] {}\\n\\n'.format('OK' if status else 'ERROR', msg))\n print('Download Complete')\n\n","sub_path":"download_apk.py","file_name":"download_apk.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"537019585","text":"# coding=utf8\n\n\nimport os\n\npid = os.fork()\nif pid < 0:\n print('error in fork')\nelif pid == 0:\n print ('I am child process (%s) and my parent is %s.'%(os.getpid(),os.getppid()))\nelse:\n print ('I am (%s) just create a child process %s' % (os.getpid(), pid))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"code/untitled1/9-1-进程和线程和协程/1-多进程/linux-unix/创建子进程.py","file_name":"创建子进程.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"645259369","text":"# https://pythontips.com/2013/07/30/20-python-libraries-you-cant-live-without/\n# MyClass(self, required=True, someNumber=, *args, **kwargs)\n# http://www.informit.com/articles/article.aspx?p=2314818\n# https://docs.djangoproject.com/en/1.10/intro/tutorial04/\n# https://devcenter.heroku.com/articles/deploying-python\n# https://www.tutorialspoint.com/sqlite/sqlite_python.htm\n# https://maryrosecook.com/blog/post/a-practical-introduction-to-functional-programming\n# scheme\n# https://www.cs.kent.ac.uk/people/staff/dat/miranda/whyfp90.pdf\n# pyqt\n# http://www.learncpp.com/cpp-tutorial/71-function-parameters-and-arguments/\n# http://www.pyimagesearch.com/2017/04/24/eye-blink-detection-opencv-python-dlib/\n# https://github.com/PyMySQL/PyMySQL\n# https://medium.com/@RobSm/deep-learning-prerequisites-logistic-regression-in-python-bcdb4c561358\n# http://www.holehouse.org/mlclass/\n# http://www.kdnuggets.com/2015/11/seven-steps-machine-learning-python.html\n# http://cglab.ca/~abeinges/blah/too-many-lists/book/first-final.html\n\n# navigate to z-drive: cd\\ then Z:\n# django innit Z:\\Inventory>C:\\Users\\neilp\\AppData\\Local\\Programs\\Python\\Python36-32\\Scripts\\dj\n# django-admin.exe startproject mysite\n# network access host through address 10.5.112.99:8000\n\nimport sqlite3 as sq\nimport time\nfrom sqlite3 import OperationalError\nimport re\n\n\n# Dates in DD-MM-YYYY\n# Create sample case inventory + rewrite maps\n\n\ndef createDB(dbfile):\n try:\n connection = sq.connect(dbfile)\n print(\"Opened database successfully\")\n return connection\n\n except OperationalError:\n print(\"Unable to initialize.\")\n return False\n\n\ndef createTableList(connection):\n if not exists_table(connection, 'TableList'):\n connection.execute(''' CREATE TABLE TableList\n (TableListID INTEGER PRIMARY KEY AUTOINCREMENT,\n Name TEXT NOT NULL,\n DateModified TEXT); ''')\n connection.commit()\n print('Table created successfully')\n return 'TableList table created successfully'\n\n else:\n print('Table already exists')\n return 'TableList table already exists'\n\n\ndef createConnectorsTable(connection):\n if not exists_table(connection, 'Connectors'):\n connection.execute(''' CREATE TABLE Connectors\n ( ConnectorID INTEGER PRIMARY KEY,\n Name TEXT NOT NULL UNIQUE,\n CurrentAmount INT NOT NULL,\n Series TEXT,\n Family TEXT,\n PairName TEXT,\n BoxAmount INT,\n CartonAmount INT,\n DateOrdered TEXT,\n ProductInfo TEXT,\n OfficeAmount INT,\n StorageAmount INT,\n SampleCase TEXT);''') # sample case name goes in SampleCase COL\n\n today = time.strftime('%d-%m-%y')\n todayTuple = (today,)\n connection.execute(\"INSERT INTO TableList (Name, DateModified) VALUES ('Connectors', ? );\", todayTuple)\n connection.commit()\n print('Table created successfully')\n return 'Connectors table created successfully'\n\n else:\n print('Table already exists')\n return \"Connectors table already exists\"\n\n\ndef createConnectorsHistoryTable(connection):\n if not exists_table(connection, 'ConnectorsHistory'):\n connection.execute(''' CREATE TABLE ConnectorsHistory\n (HistoryID INTEGER PRIMARY KEY,\n Name TEXT NOT NULL,\n Amount INT NOT NULL,\n Date TEXT NOT NULL,\n Difference INT,\n FOREIGN KEY(Name) REFERENCES Connectors(Name)); ''')\n\n today = time.strftime('%d-%m-%y')\n todayTuple = (today,)\n connection.execute(\"INSERT INTO TableList (Name, DateModified) VALUES ('ConnectorsHistory', ? );\", todayTuple)\n connection.commit()\n print(\"Table created successfully\")\n return 'ConnectorsHistory table created successfully'\n else:\n print('Table already exists')\n return 'ConnectorsHistory table already exists'\n\n\ndef createSampleCasesTable(connection):\n if not exists_table(connection, 'SampleCases'):\n connection.execute('''CREATE TABLE SampleCases\n ( SampleCaseID INTEGER PRIMARY KEY,\n Name TEXT NOT NULL UNIQUE,\n Amount INT,\n Date TEXT ); ''')\n\n today = time.strftime('%d-%m-%y')\n todayTuple = (today,)\n\n connection.execute(\"INSERT INTO TableList (Name, DateModified) VALUES ('SampleCases', ? );\", todayTuple)\n connection.commit()\n print('Table created successfully')\n return 'SampleCases table created successfully'\n else:\n print('Table already exists')\n return 'SampleCases table already exists'\n\n\ndef createSampleCasesHistoryTable(connection):\n if not exists_table(connection, 'SampleCasesHistory'):\n connection.execute(''' CREATE TABLE SampleCasesHistory\n (SampleCaseHistoryID INTEGER PRIMARY KEY,\n Name TEXT NOT NULL,\n Amount INT NOT NULL,\n Date TEXT NOT NULL,\n Difference INT,\n FOREIGN KEY(Name) REFERENCES SampleCases(Name)); ''')\n\n today = time.strftime('%d-%m-%y')\n todayTouple = (today,)\n connection.execute(\"INSERT INTO TableList (Name, DateModified) VALUES ('SampleCasesHistory', ? );\", todayTouple)\n connection.commit()\n print(\"Table created successfully\")\n return 'SampleCasesHistory table created successfully'\n else:\n print('Table already exists')\n return 'SampleCasesHistory table already exists'\n\n\ndef create_changelog_table(connection):\n if not exists_table(connection, 'Changelog'):\n connection.execute('''CREATE TABLE Changelog\n (ChangelogID INTEGER PRIMARY KEY,\n TableName TEXT NOT NULL,\n Statement TEXT NOT NULL,\n DATE TEXT NOT NULL,\n FOREIGN KEY (TableName) REFERENCES TableList(Name));''')\n\n today = time.strftime('%d-%m-%y')\n todayTouple = (today,)\n connection.execute(\"INSERT INTO TableList (Name, DateModified) VALUES ('Changelog', ? );\", todayTouple)\n connection.commit()\n print(\"Table created successfully\")\n return 'Changelog table created successfully'\n\n else:\n print('Table already exists')\n return 'Changelog table already exists'\n\n\ndef fillTable(dict, connection, tableName):\n\n \"\"\"\n Abstract using dict:\n create the following functions and call them in this function\n\n return {\n 'Connectors' : fill_Connectors_table(),\n 'ConnectorsHistory' : fill_ConnectorsHistory_table(),\n 'SampleCases': fill_SampleCases_table(),\n 'SampleCasesHistory' : fill_SampleCasesHistory_table()\n }[tableName]\n \"\"\"\n\n try:\n keys = list(dict.keys())\n\n keySTR = \", \".join(keys)\n keySTR = '(' + keySTR + ')'\n\n values = str(list(dict.values()))\n valuesSTR = values.replace(\"'\", '\"')\n valuesSTR = valuesSTR.replace('[', '(')\n valuesSTR = valuesSTR.replace(']', ')')\n\n # DIFFERENTITATES BETWEEN ' AND \" IN EXECUTE STATEMENT\n\n if tableName == 'Connectors':\n required_values = ('Name', 'CurrentAmount')\n entries = ('ConnectorID', 'Name', 'Series', 'CurrentAmount', 'Type', 'PairName', 'BoxAmount',\n 'CartonAmount', 'DateOrdered', 'ProductInfo', 'OfficeAmount', 'StorageAmount', 'SampleCase')\n\n if set(keys).issubset(set(entries)) and set(required_values).issubset(set(entries)):\n\n if connection.execute('''SELECT EXISTS\n (SELECT 1 FROM Connectors WHERE Name = ?)''', (dict['Name'],)).fetchone()[0]:\n\n insert_list = re.sub(r\"[\\[\\]);]|UNION\", \"\",\n str(list(zip(\n dict.keys(), dict.values()))).replace(\"',\", \"=\").replace(\"('\", \"\"),\n flags=re.IGNORECASE)\n\n today = time.strftime('%d-%m-%y')\n\n try:\n try:\n old_amount = connection.execute(\n \"SELECT CurrentAmount from Connectors WHERE Name = ?\", (dict['Name'],)).fetchall()[0][0]\n\n except OperationalError:\n old_amount = 0\n\n try:\n history_amount = connection.execute('''\n SELECT Amount\n FROM ConnectorsHistory\n WHERE Name = ?\n AND HistoryID =(SELECT MAX(HistoryID)\n FROM ConnectorsHistory\n WHERE Name = ?)''', (dict['Name'], dict['Name'])).fetchone()[0]\n except (OperationalError, TypeError):\n history_amount = 0\n\n difference = old_amount - history_amount\n\n entry_tuple = (dict['Name'], old_amount, today, difference,)\n\n connection.execute(\n '''INSERT INTO ConnectorsHistory (Name, Amount, Date, Difference) VALUES\n (?,?,?,?)''', entry_tuple)\n\n print('Records successfully added to connectors history table')\n # FIX\n connection.execute(\"UPDATE Connectors SET {0} WHERE Name = ?\".format(str(insert_list)),\n (dict['Name'],))\n print('Records successfully updated in connectors table.')\n\n return 'Records successfully added to History and Connectors table'\n\n except OperationalError:\n print('Operational Error, no records modified')\n return 'Operational Error, no records modified'\n\n else:\n try:\n connection.execute('INSERT INTO Connectors {0} VALUES {1}'.format(keySTR, valuesSTR))\n print('Records successfully added to connectors table.')\n\n return 'Records successfully added to Connectors table'\n except OperationalError:\n print('Operational Error, no records modified')\n return 'Operational Error no records modified'\n\n else:\n print(\"Invalid keys for update in Connectors table\")\n return 'Invalid keys for update in Connectors table'\n\n elif tableName == 'ConnectorsHistory':\n entries = ('Name', 'Amount', 'Date', 'Difference')\n\n if (set(entries) - set(keySTR) == set()) or (set(entries) - set(keySTR) == {'Difference'}):\n # REDO STATEMENT\n\n connection.exectute('INSERT INTO ConnectorsHistory ' + keySTR + 'VALUES ' + valuesSTR)\n\n if 'Difference' not in keySTR:\n try:\n old_amount = connection.execute('''\n SELECT Amount\n FROM ConnectorsHistory\n WHERE Name = ?\n AND HistoryID =(SELECT MAX(HistoryID)\n FROM ConnectorsHistory\n WHERE Name = ?)''', (dict['Name'], dict['Name'])).fetchall()[0][0]\n # VERIFY THIS STATEMENT WORKS\n except OperationalError:\n old_amount = 0\n connection.execute(\n 'UPDATE ConnectorsHistory SET Difference = {0} WHERE Name = {1}'.format(old_amount,\n dict['Name']))\n # TEST CONNECTORS HISTORY\n\n print('Records successfully added to ConnectorsHistory table.')\n return 'Records successfully added to Connectors History table'\n\n else:\n print('Invalid keys for update in ConnectorsHistory table')\n return 'Invalid keys for update in Connectors History table'\n\n elif tableName == 'SampleCases':\n entries = ('Name', 'Amount', 'Date')\n\n if set(keys).issubset(entries):\n\n if 'Date' not in dict.keys():\n dict['Date'] = time.strftime('%d-%m-%y')\n\n if connection.execute('''SELECT EXISTS\n (SELECT 1 FROM SampleCases WHERE Name = ?)''', (dict['Name'],)).fetchall()[0][0]:\n\n insert_list = re.sub(r\"[\\[\\]);]|UNION\", \"\",\n str(list(zip(\n dict.keys(), dict.values()))).replace(\"',\", \"=\").replace(\"('\", \"\"),\n flags=re.IGNORECASE)\n try:\n if connection.execute('''SELECT EXISTS\n (SELECT * FROM SampleCases WHERE Name = ?)''',\n (dict['Name'],)).fetchone()[0]:\n old_amount = connection.execute(\n \"SELECT Amount FROM SampleCases WHERE Name = ?\", (dict['Name'],)).fetchall()[0][0]\n\n else:\n\n old_amount = 0\n\n try:\n history_amount = connection.execute('''\n SELECT AMOUNT\n FROM SampleCasesHistory\n WHERE Name = ?\n AND SampleCaseHistoryID = (SELECT MAX(SampleCaseHistoryID\n FROM ConnectorsHistory\n WHERE Name = ?''',\n (dict['Name'], dict[\"Name\"])).fetchall()[0][0]\n except OperationalError:\n history_amount = 0\n\n difference = old_amount - history_amount\n\n entry_tuple = (dict['Name'], old_amount, dict['Date'], difference)\n\n connection.execute(\n \"INSERT INTO SampleCasesHistory (Name, Amount, Date, Difference) VALUES (?,?,?,?)\",\n entry_tuple)\n\n print(\"Records successfully added to sample cases history table\")\n\n connection.execute(\n \"UPDATE SampleCases SET {0} WHERE Name = ?\".format(str(insert_list)), (dict[\"Name\"],))\n\n connection.execute(\n '''UPDATE Connectors\n SET CurrentAmount = CurrentAmount - ?\n WHERE SampleCase = ?''', (dict['Amount'], dict['Name']))\n\n print(\"Records successfully added to the sample cases table\")\n\n return 'Records successfully added to SampleCases table, ' \\\n 'and Connector amounts successfully reduced'\n\n # FIX DECREMENT STATEMENT ON CONNECTORS\n # TEST SAMPLECASES\n\n except OperationalError:\n print('Operational Error, no records modified')\n return 'Operational Error, no records modified'\n else:\n try:\n connection.execute(\"INSERT INTO SampleCases {0} VALUES {1}\".format(keySTR, valuesSTR))\n connection.execute(\"\"\n \"UPDATE SampleCases \"\n \"SET Date = ? \"\n \"WHERE Name = ?\", (dict['Date'], dict[\"Name\"]))\n\n connection.execute(\n '''UPDATE Connectors\n SET CurrentAmount = CurrentAmount - ?\n WHERE SampleCase = ?''', (dict['Amount'], dict['Name']))\n\n return 'Records successfully added to SampleCases table, ' \\\n 'and Connector amounts successfully reduced'\n\n except OperationalError:\n\n print(\"Operational Error, no records modified\")\n return 'Operational Error, no records modified'\n else:\n print('Invalid keys for update in Sample Cases table')\n return 'Invalid keys for update in Samples Cases table'\n\n elif tableName == 'SampleCasesHistory':\n entries = ('Name', 'Amount', 'Date', 'Difference')\n\n if (set(entries) - set(keySTR) == set()) or (set(entries) - set(keySTR) == {'Difference'}):\n if 'Difference' not in keySTR:\n try:\n connection.execute('''SELECT Amount\n FROM SampleCasesHistory\n WHERE Name = ?\n AND HistoryID = (SELECT MAX(HistoryID)\n FROM SampleCasesHistory\n WHERE Name = ? ''', (dict['Name'], dict['Name']))\n\n except OperationalError:\n connection.execute('UPDATE SampleCasesHistory SET Difference = 0 WHERE Name = ' + dict['Name'])\n\n connection.exectute('INSERT INTO SampleCasesHistory ' + keySTR + 'VALUES ' + valuesSTR)\n print('Records successfully added to SampleCasesHistory table.')\n return 'Records successfully added to Sample Cases History table'\n\n else:\n print('Invalid keys for update in SampleCasesHistory table')\n return 'Invalid keys for update in Sample Cases History table'\n # TEST SAMPLECASESHISTORY\n else:\n print('Not a valid table')\n return 'Not a valid table'\n\n except AttributeError:\n pass\n\n\ndef exists_table(connection, table_name):\n query = \"SELECT 1 FROM sqlite_master WHERE type='table' and name = ?\"\n return connection.execute(query, (table_name,)).fetchone() is not None\n\n\ndef exists_column(connection, table_name, column_name):\n query = connection.execute(\"PRAGMA table_info({0})\".format(table_name))\n flag = False\n for item in query:\n if column_name in item:\n flag = True\n return flag\n\n\ndef query_table(connection, entry_list): # entry_list is column criteria value\n connection_object = 'Error (Check Capitalization)'\n if entry_list and exists_table(connection, entry_list[2]):\n if exists_column(connection, entry_list[2], entry_list[0]):\n entry_list[1] = ''.join(['*', entry_list[1], '*'])\n execute_tuple = (entry_list[1],)\n connection_object = connection.execute(\n \"SELECT * FROM {0} WHERE {1} GLOB ?\".format(entry_list[2], entry_list[0]), execute_tuple)\n return connection_object\n\n\ndef trim_TableList(connection):\n connection.execute('''\n DELETE FROM TableList\n WHERE TableListID NOT IN (\n SELECT MAX(TableListID)\n FROM TableList\n GROUP BY Name)''')\n connection.commit()\n\n\n# for test use:\n\ndb = createDB('Z:\\Inventory\\InventoryGUI\\inventory.db')\nconn_dict = {'ConnectorID': 1, 'Name': 'MUSBR', 'CurrentAmount': 10}\nhis_dict = {'Name': 'MUSBR', 'Amount': 25}\nsample_dict = {'Name': 'HARSH', 'Amount': 5}\n\n\n\"\"\"\nLEGACY FUNCTIONS\n\ndef view_values(connection_object):\n values = format_values(retrieve_values(connection_object))\n if values:\n return values\n else:\n return 'Table is empty'\n\ndef retrieve_values(connection_object):\n values_list = []\n\n for value in connection_object:\n values_list.append(value)\n return values_list\n\ndef has_values(connection, table):\n return bool(connection.execute('SELECT COUNT(*) FROM ' + table).fetchall()[0][0])\n\ndef format_values(values_list):\n characters = ['[', ']', '(', ')', \"'\", ',']\n\n values_str = str(values_list)\n for chars in characters:\n if chars in values_str:\n if chars == '(':\n values_str = values_str.replace(chars, '\\n ')\n else:\n values_str = values_str.replace(chars, \"\")\n\n return values_str\n\n\ndef query_headings(connection, table_name):\n try:\n table_headings = connection.execute(\"SELECT * FROM {0}\".format(table_name))\n return ' '.join(map(lambda x: x[0], table_headings.description)) + \" \\n\"\n except OperationalError:\n return 'Invalid: '\n\n\n\n\"\"\"\n","sub_path":"SQLinventory.py","file_name":"SQLinventory.py","file_ext":"py","file_size_in_byte":20902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"115458373","text":"import Anton as aen\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom scipy.stats import linregress as lin\n\ndef gcfunction(files, temp): # liefert nur die files mit einer bestimmten Temperatur\n gc = []\n for i in files:\n fname,name = os.path.split(i)\n gcn = name.split('_')[-1:]\n gcn = gcn[0][:-4]\n gcn = float(gcn)\n if gcn == temp:\n gc.append(i)\n return gc \n \ndef entry():\n evaluatin = input('>>> ')\n evi = evaluatin.split(';')\n times = [(i) for i in evi]\n times = [i.split(',') for i in times]\n times = [[float(j) for j in i] for i in times]\n return times\n \ndef seerawdata(files,temp):\n forty = gcfunction(files,temp)\n j = 1\n for i in forty:\n _,title = os.path.split(i)\n data = np.load(i)\n plt.subplot(4,3,j)\n plt.plot(data[0,:],data[1,:], '-ob')\n plt.grid()\n plt.title(title)\n j += 1\n figure = plt.gcf()\n figure.set_size_inches(20,17)\n plt.savefig(('%s.png'% title), dpi=200)\n plt.close(figure)\n return forty\n\ndef createSlopes(tempfiles, times):\n slopes = np.array([])\n j = 0\n for i in tempfiles:\n if times[j][0] != times[j][1]:\n path,title = os.path.split(i)\n data = np.load(i)\n start = np.where(data[0] >= times[j][0])[0][0]\n ende = np.where(data[0] <= times[j][1])[0][-1]\n x = data[0,start:ende]\n# print(np.max(data[1,start]))\n y = data[1,start:ende]/data[1,start] \n slope, intercept, r_value, p_value, slope_std_error = lin(x, y)\n print(slope)\n fx = intercept+ slope*x \n lab = ('%s %s - %s'% (title, str(data[0][start]),str(data[0][ende])))\n plt.plot(x,fx, label=lab)\n plt.legend()\n j += 1\n slopes = np.append(slopes,slope)\n else: j += 1\n name = os.path.split(tempfiles[0])[1].split('_')\n temp = ( '%.1f' % float(name[-1].split('.npy')[0])) \n name = ('%s_%s_%s_%s_Celsius' %(name[1],name[2],name[3],temp))\n file = os.path.join(os.path.split(os.path.split(tempfiles[0])[0])[0],name)\n plt.title('%s'% times)\n plt.show()\n np.save(file, slopes)\n return slopes\n \n#%%\npath = r'Z:\\2_Projekt__Permeabilitätsbeeinflussung\\02_Löslichkeitsuntersuchungen\\HS Microscope\\Experiments\\Final_results\\data\\1 - RIMR+KE60'\n\nfiles = aen.searchfiles(path, '.npy')\nfiles.sort()\n#data = np.load(files[0])\ntemp = 42.5\n\ntempfiles = seerawdata(files, temp)\ntimes = entry()\n\nslopes = createSlopes(tempfiles,times)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"2 - data2graph/evaluateData/readData.py","file_name":"readData.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"159586047","text":"import random\n\nfrom database import db_query\n\n\nclass Responses:\n \"\"\"Class to save responses in memory on each start of the bot. This is done to remove extensive database querying\"\"\"\n\n responses = dict()\n\n @classmethod\n def collect(cls):\n #Done on startup\n data = db_query(\"select job_type, response_type, phrase from responses\")\n for job_type, response_type, text in data:\n cls.responses[(job_type, response_type)] = text\n\n @classmethod\n def get(cls, job_type: int, response_type: int) -> str:\n #Gets random response if it exists or returns empty string otherwise\n try:\n text = cls.responses[(job_type, response_type)]\n phrases = [x.strip() for x in text.split(\"\\n\") if len(x) != 1]\n\n #zero len list breaks random choice\n if len(phrases) == 0:\n return ''\n return random.choice(phrases)\n except KeyError:\n return ''\n\n @classmethod\n def get_entity(cls, job_type: int, response_type: int) -> str:\n #Gets random response if it exists or returns empty string otherwise\n try:\n return cls.responses[(job_type, response_type)]\n except KeyError:\n return \"\"\n\n @classmethod\n def update(cls, job_type: int, response_type: int, text: str) -> None:\n #Runs after insertion of new responses to database\n cls.responses[(job_type, response_type)] = text\n","sub_path":"bot/responses.py","file_name":"responses.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"571276969","text":"import numpy as np\nimport pandas as pd\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey\n\nclass SqLiteDB:\n # Main DB Connection Ref Obj\n db_engine = None\n\t \n # Class members\n SQLITE = 'sqlite'\n\n # Table Names\n TABLE = 'users'\n\n # http://docs.sqlalchemy.org/en/latest/core/engines.html\n DB_ENGINE = {\n SQLITE: 'sqlite:///{DB}'\n }\n\n\t # constructor\n def __init__(self, dbtype, username='', password='', dbpath='', dbname=''):\n dbtype = dbtype.lower()\n if dbpath != '':\n dbname = dbpath+dbname\n \n if dbtype in self.DB_ENGINE.keys():\n engine_url = self.DB_ENGINE[dbtype].format(DB=dbname)\n self.db_engine = create_engine(engine_url)\n print(\"SQLite database: \"+str(self.db_engine))\n else:\n print(\"DBType is not found in DB_ENGINE\")\n \n # passes the query on to the provider\n def execute_query(self, query=''):\n if query == '' : return\n print (query)\n with self.db_engine.connect() as connection:\n try:\n connection.execute(query)\n except Exception as e:\n print(e)\n \n # get all table data\n def get_all_data(self, table='', query=''):\n query = query if query != '' else \"SELECT * FROM '{}';\".format(table)\n #print(query)\n with self.db_engine.connect() as connection:\n try:\n result = connection.execute(query)\n except Exception as e:\n print(e)\n else:\n #for row in result:\n # print(row) # print(row[0], row[1], row[2])\n result.close()\n print(\"\\n\")\n table_df = pd.read_sql_table(\n table,\n con=self.db_engine\n )\n return table_df\n ","sub_path":"Level1/dbManager.py","file_name":"dbManager.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"444587086","text":"#\n# CS 196 Data Hackerspace\n# Assignment 1: Data Parsing and NumPy\n# Due September 24th, 2018\n#\n\nimport json\nimport csv\nimport numpy as np\nimport pandas as pd\n\ndef histogram_times(filename):\n with open(filename) as f:\n \tcsv_reader = csv.reader(f)\n \tdata_list = list(csv_reader)\n time_column_number = 0\n output_list = np.zeros(24)\n for i in range(0, len(data_list[0])):\n \tif (data_list[0][i] == 'Time'):\n \t\ttime_column_number = i\n for i in range(1,len(data_list)):\n \tif data_list[i][time_column_number]:\n time = \"\"\n count = 0\n for c in data_list[i][time_column_number]:\n if(not(c.isdigit()) and time != \"\"):\n break\n elif(c.isdigit()):\n time += c\n hours = int(time)\n if hours <= 23:\n output_list[hours] += 1\n return output_list\n\ndef weigh_pokemons(filename, weight):\n pokedex = pd.read_json(filename)\n pokedex = pd.read_json((pokedex['pokemon']).to_json(), orient = 'index')\n f = lambda x: float(x[0])\n pokedex.weight = pokedex.weight.str.split(\" \").apply(f)\n names = pokedex[pokedex.weight == weight].name.tolist()\n return names\n\ndef single_type_candy_count(filename):\n pokedex = pd.read_json(filename)\n pokedex = pd.read_json((pokedex['pokemon']).to_json(), orient = 'index')\n f = lambda x: len(x)\n pokedex.type = pokedex.type.apply(f)\n return pokedex[pokedex.type == 1].candy_count.sum()\n\ndef reflections_and_projections(points):\n if (len(points) != 2):\n return None\n points[1] = 1 - (points[1] - 1)\n rotater = np.array([[0,-1],[1,0]])\n points = np.dot(rotater, points)\n projecter = np.array([[1,3],[3,9]])\n points = 0.1 * np.dot(projecter, points)\n return points\n\ndef normalize(image):\n if (np.shape(image) != (32,32)):\n return None\n image = image.astype(float)\n max_value = np.max(image)\n min_value = np.min(image)\n image = (image - min_value) * (255 / (max_value - min_value))\n return image\n\n\ndef sigmoid_normalize(image, a):\n if (np.shape(image) != (32,32)):\n return None\n image = image.astype(float)\n image = 255 * ((1 + np.exp(- ((a ** (-1)) * (image - 128)))) ** (-1))\n return image","sub_path":"hw1.py","file_name":"hw1.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"274730525","text":"import argparse\nfrom matplotlib import cm\nfrom utils import get_logger\nimport pandas as pd\nfrom utils import read_file_contents_list\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\n\nlogger = get_logger('Plot')\n\n\ndef main():\n parser = argparse.ArgumentParser('Plot box and scatter data.')\n parser.add_argument('--csv-data', type=str)\n parser.add_argument('--column', type=str)\n parser.add_argument('--out-fig', type=str)\n parser.add_argument('--outlier-list-lung-dice', type=str)\n parser.add_argument('--outlier-list-body-dice', type=str)\n parser.add_argument('--outlier-list-nmi', type=str)\n parser.add_argument('--outlier-list-manual', type=str)\n parser.add_argument('--thres-val', type=float)\n args = parser.parse_args()\n\n logger.info(f'Read csv: {args.csv_data}')\n data_df = pd.read_csv(args.csv_data)\n data_dict = data_df.set_index('Scan').to_dict()[args.column]\n # print(data_dict)\n outlier_list_lung_dice = read_file_contents_list(args.outlier_list_lung_dice)\n outlier_list_body_dice = read_file_contents_list(args.outlier_list_body_dice)\n outlier_list_nmi = read_file_contents_list(args.outlier_list_nmi)\n outlier_list_manual = read_file_contents_list(args.outlier_list_manual)\n\n outlier_items = [\n {\n 'outlier_list': outlier_list_lung_dice,\n 'idx': 2,\n 'color': 'red'\n },\n {\n 'outlier_list': outlier_list_body_dice,\n 'idx': 3,\n 'color': 'blue'\n },\n {\n 'outlier_list': outlier_list_nmi,\n 'idx': 4,\n 'color': 'green'\n },\n {\n 'outlier_list': outlier_list_manual,\n 'idx': 5,\n 'color': 'orange'\n }\n ]\n\n num_metric = 4\n y_all = data_df[args.column].to_numpy()\n y_all_table = np.zeros((len(y_all), num_metric+1))\n for i in range(num_metric+1):\n y_all_table[:, i] = y_all[:]\n x_all = np.random.normal(1, 0.01, len(y_all))\n\n fig, ax = plt.subplots()\n\n plt.boxplot(y_all_table)\n plt.scatter(x_all, y_all, c='grey', alpha=1)\n\n for outlier_item in outlier_items:\n x_outlier, y_outlier = get_x_y_outlier_list(data_dict,\n outlier_item['outlier_list'],\n outlier_item['idx'])\n plt.scatter(x_outlier, y_outlier, c=outlier_item['color'], alpha=0.5)\n\n labels = [item.get_text() for item in ax.get_xticklabels()]\n labels[0] = f'All ({len(y_all)})'\n labels[1] = f'Outliers (Lung, {len(outlier_list_lung_dice)}/{len(y_all)})'\n labels[2] = f'Outliers (Body, {len(outlier_list_body_dice)}/{len(y_all)})'\n labels[3] = f'Outliers (NMI, {len(outlier_list_nmi)}/{len(y_all)})'\n labels[4] = f'Outliers (Manual QA, {len(outlier_list_manual)}/{len(y_all)})'\n ax.set_xticklabels(labels)\n\n # Threshold.\n print(f'Thres: {args.thres_val}')\n plt.axhline(y=args.thres_val, color='r', linestyle='--')\n\n logger.info(f'Save plot to {args.out_fig}')\n fig.set_size_inches(14, 7.5)\n plt.savefig(args.out_fig)\n\n\ndef get_x_y_outlier_list(data_dict, outlier_list, x_idx):\n y_outlier = [data_dict[file_name] for file_name in outlier_list]\n x_outlier = np.random.normal(x_idx, 0.01, len(y_outlier))\n\n return x_outlier, y_outlier\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/get_combined_box_and_scatter.py","file_name":"get_combined_box_and_scatter.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"183775574","text":"# coding=utf-8\nimport sys \n\"\"\" \n1. We have an integer array where all the elements appear twice while only 1 elements appears once. Please find the 1 elements. [直接说出步骤,然后换成下面这道题]\n2. We have an integer array where all the elements appear twice while only 2 elements appears once. Please find the 2 elements.\n\n数组中只有两个数字只出现了一次,其余数字重复了两次,找出这两个数字\n使用异或操作--最简单\n面试官说不要使用异或操作,我想出来使用快排,然后面试官说对的,但细节有点问题\n说了细节之后开始写代码\n\"\"\"\n# 代码有bug,没有完成\n# 2019/8/27 17:03 完成\ndef findUnique(nums):\n ret = []\n left, right = 0, len(nums) - 1\n \n while True:\n boundary = quickHelper(nums, left, right)\n print(\"[debug] boundary: {}\".format(boundary))\n retL = handle(nums, left, boundary+1)\n retR = handle(nums, boundary+1, right+1)\n print(\"[debug] retL: {}, retR: {}\".format(retL, retR))\n # 1. left ret not equals 0\n if retL != 0:\n if retR != 0:\n ret.append(retL)\n ret.append(retR)\n break\n else:\n right = boundary\n # 2. right\n else: # left ret equals 0\n left = boundary\n \n return ret \n\ndef handle(nums, left, right):\n ret = 0\n for n in nums[left:right]:\n ret = ret ^ n \n \n return ret \n\ndef quickHelper(nums, left, right):\n mid = left + (left - right) >> 1\n nums[mid], nums[right] = nums[right], nums[mid]\n boundary = left \n for i in range(left, right):\n if nums[i] <= nums[right]:\n nums[i], nums[boundary] = nums[boundary], nums[i]\n boundary += 1\n \n nums[boundary], nums[right] = nums[right], nums[boundary] # exchange\n\n return boundary\n\ndef test():\n nums = [1,2,5,4,4,2,3,1]\n ret = findUnique(nums)\n print(ret)\n\nif __name__ == '__main__':\n test()","sub_path":"LeetCode/otherQuestion/MSTR/interview/findUnique.py","file_name":"findUnique.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"634441705","text":"import composite_behavior\nimport behavior\nimport skills.move\nimport constants\nimport math\nimport role_assignment\nimport robocup\nimport main\n\n\n## Robots position themselves along a portion of the circle centered at the ball\nclass CircleNearBall(composite_behavior.CompositeBehavior):\n\n BackupBallLocation = robocup.Point(0, constants.Field.Length / 4.0)\n\n def __init__(self):\n super().__init__(continuous=True)\n\n self.add_transition(behavior.Behavior.State.start,\n behavior.Behavior.State.running, lambda: True,\n 'immediately')\n self.add_transition(behavior.Behavior.State.running,\n behavior.Behavior.State.completed,\n lambda: self.all_subbehaviors_completed(),\n 'all robots reach target positions')\n self.add_transition(behavior.Behavior.State.completed,\n behavior.Behavior.State.running,\n lambda: not self.all_subbehaviors_completed(),\n \"robots aren't lined up\")\n\n i = 0\n for pt in self.get_circle_points(6):\n self.add_subbehavior(\n skills.move.Move(pt),\n name=\"robot\" + str(i),\n required=False,\n priority=6 - i)\n i = i + 1\n\n def get_circle_points(self, num_of_points):\n radius = constants.Field.CenterRadius + constants.Robot.Radius + 0.01\n ball_pos = main.ball().pos if main.ball() != None else robocup.Point(\n constants.Field.Width / 2, constants.Field.Length / 2)\n circle_ball = robocup.Circle(ball_pos, radius)\n\n intersection_points = []\n for i in constants.Field.FieldBorders:\n\n for j in circle_ball.intersects_line(i):\n # Using near_point because of rounding errors\n if constants.Field.FieldRect.near_point(j, 0.001):\n intersection_points.append(j)\n\n angles = []\n candidate_arcs = []\n if len(intersection_points) > 1:\n for i in intersection_points:\n new_angle = (i - circle_ball.center).angle()\n new_angle = self.normalize_angle(new_angle)\n angles.append(new_angle)\n\n # Get angles going sequentially\n angles.sort()\n\n counter = 1\n while counter < len(angles):\n candidate_arcs.append(robocup.Arc(circle_ball.center, radius,\n angles[counter - 1], angles[\n counter]))\n counter = counter + 1\n candidate_arcs.append(robocup.Arc(circle_ball.center, radius,\n angles[len(angles) - 1], angles[\n 0]))\n\n i = 0\n while i < len(candidate_arcs):\n angle_between = candidate_arcs[i].end() - candidate_arcs[\n i].start()\n angle_between = self.normalize_angle(angle_between)\n\n angle_diff = candidate_arcs[i].start() + (angle_between) / 2.0\n angle_diff = self.normalize_angle(angle_diff)\n\n midpoint = (\n candidate_arcs[i].center() + robocup.Point(radius, 0))\n midpoint.rotate(candidate_arcs[i].center(), angle_diff)\n if not constants.Field.FieldRect.contains_point(midpoint):\n candidate_arcs.pop(i)\n else:\n i = i + 1\n\n candidate_arcs.sort(\n key=lambda arc: self.normalize_angle(arc.end() - arc.start()),\n reverse=True)\n\n if len(candidate_arcs) <= 0:\n final_arc = robocup.Arc(CircleNearBall.BackupBallLocation,\n radius, math.pi / 2, 5 * math.pi / 2)\n else:\n final_arc = candidate_arcs[0]\n else:\n midpoint = (circle_ball.center + robocup.Point(radius, 0))\n if not constants.Field.FieldRect.contains_point(midpoint):\n final_arc = robocup.Arc(CircleNearBall.BackupBallLocation,\n radius, math.pi / 2, 5 * math.pi / 2)\n else:\n final_arc = robocup.Arc(circle_ball.center, radius,\n math.pi / 2, 5 * math.pi / 2)\n\n arc_angle = final_arc.end() - final_arc.start()\n arc_angle = self.normalize_angle(arc_angle)\n\n perRobot = arc_angle / (num_of_points + 1)\n\n dirvec = robocup.Point(radius, 0)\n dirvec.rotate(robocup.Point(0, 0), final_arc.start())\n dirvec.rotate(robocup.Point(0, 0), perRobot)\n\n final_points = []\n for i in range(num_of_points):\n pt = final_arc.center() + dirvec\n final_points.append(pt)\n dirvec.rotate(robocup.Point(0, 0), perRobot)\n\n return final_points\n\n def execute_completed(self):\n num_robots = 0\n for b in self.all_subbehaviors():\n if b.robot is not None:\n num_robots += 1\n\n i = 0\n for pt in self.get_circle_points(num_robots):\n self.subbehavior_with_name(\"robot\" + str(i)).pos = pt\n i = i + 1\n\n # set robot attributes\n for b in self.all_subbehaviors():\n if b.robot is not None:\n b.robot.set_avoid_ball_radius(constants.Field.CenterRadius)\n b.robot.face(main.ball().pos)\n\n # Makes an angle > 0, < pi * 2\n def normalize_angle(self, angle):\n # TODO make this O(1) and move to cpp\n while angle > math.pi * 2:\n angle = angle - math.pi * 2\n while angle < 0:\n angle = angle + math.pi * 2\n return angle\n\n def execute_running(self):\n num_robots = 0\n for b in self.all_subbehaviors():\n if b.robot is not None:\n num_robots += 1\n\n i = 0\n for pt in self.get_circle_points(num_robots):\n self.subbehavior_with_name(\"robot\" + str(i)).pos = pt\n i = i + 1\n\n # set robot attributes\n for b in self.all_subbehaviors():\n if b.robot is not None:\n b.robot.set_avoid_ball_radius(constants.Field.CenterRadius)\n b.robot.face(main.ball().pos)\n","sub_path":"soccer/gameplay/tactics/stopped/circle_near_ball.py","file_name":"circle_near_ball.py","file_ext":"py","file_size_in_byte":6444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"95232562","text":"#!/user/bin/python3\n\nimport datetime\nimport os\nimport re\nimport sys\nfrom remove_oldpkg import * \n\nclass Add_Entry: \n def __init__(self, usn_file,usn_tiks):\n self.usn_file = usn_file\n self.usn_tiks = usn_tiks\n \n def rem_dups(self, to_add):\n new_toadd = [] ; new_tiknums = []\n for x in to_add:\n if x not in new_toadd:\n new_toadd.append(x)\n #new_tiknums.append(tiknums[to_add.index(x)])\n return new_toadd\n\n\n def convert_date(self):\n months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n today = datetime.date.today()\n month = today.month; month = months[month]\n day = today.day; \n if day < 10: \n day = '0'+ str(day)\n year = str(today.year)\n date = str(month)+ ' ' + str(day) + ', ' + str(year)\n return date\n\n\n def get_tiknum(self, to_add):\n tiknums = [] ; findnum = False; i = 0\n with open(self.usn_tiks) as f: \n for x in to_add:\n for line in f:\n if not findnum:\n if x in line: \n findnum = True\n else: \n if 'NUMBER' in line:\n tiknum = line.split(':')[1]\n tiknum = tiknum.strip()\n tiknums.append(tiknum)\n if i < len(to_add):\n i = i + 1\n else: \n break\n return tiknums\n \n def reformat(self, x, tiknum):\n n = 0; x = ' \\''+x+'\\','\n sys.stdout.write(x)\n space_fill = 53 - len(x)\n date = Add_Entry.convert_date(self)\n newline = x +'#'.rjust(space_fill) + ' Ticket ' + tiknum + '; expires ' + date + '\\n'\n return newline \n\n\n def add_to_file(self, to_add, tiknums):\n temp_path = '/u/guinm/puppet/modules/internal/cecs/manifests/ubuntu/usn.pp.new'\n delim = ']:'\n if os.path.isfile(temp_path):\n os.remove(temp_path)\n with open(self.usn_file) as f: \n #read in until we find the end of the list\n for line in f: \n if delim in line and '#' not in line: \n #now we add in our packagenames, but in puppet format\n for x in to_add: \n try:\n tiknum = tiknums[to_add.index(x)]\n except Exception: \n print(\"Index out of range: continuing\")\n continue\n newline = Add_Entry.reformat(self, x, tiknum)\n print_to_file(newline, temp_path)\n print_to_file(line, temp_path)\n else:\n print_to_file(line, temp_path)\n f.close()\n os.rename(temp_path, self.usn_file)\n #add snot function to comp below this line \n\n\n def check_packages(self, packages, versions):\n try: \n chkcommand = 'dpkg -s ' + packages\n check = os.popen(chkcommand).read()\n if 'Status: install ok installed' not in check: \n print (packages + \" not installed, no need to upgrade\")\n return None\n else: \n print (packages + \" installed. Checking to see if upgrade required: \")\n versioncommand = chkcommand + \" | grep Version\"\n check = os.popen(versioncommand).read()\n check = check.split()[1]\n if check < versions:\n print (\"upgrade to \" + versions + \" may be needed.\") \n return packages\n else: \n print (\"No upgrade needed. \") \n return None\n except Exception:\n print (\"packagename could not be parsed \")\n\n def parse_multi(self, line):\n line = line.lstrip()\n pkgname = line.split()[0]\n version = line.split()[1]\n return pkgname, version\n\n def parse_oneline(self, line): \n pkgname = line.split(':',2)[-1]\n try: \n version = pkgname.split(' ',3)[2]\n except Exception:\n print(\"No version specified\")\n version = None\n try:\n pkgname = pkgname.split(' ',3)[1]\n except Exception:\n print (\"No packagename specified\")\n pkgname = None\n\n return pkgname, version\n\n def get_packagenames(self):\n packages =[]; versions = []; tik_numbers = []\n with open(self.usn_tiks, 'r') as f:\n for line in f:\n if 'ubuntu' in line and '.com' not in line and 'http' not in line and not 'list' in line:\n if re.match(r'\\S', line):\n pkgname, version = Add_Entry.parse_oneline(self, line)\n packages.append(pkgname)\n versions.append(version)\n elif not 'kernel' in line and 'firefox' not in line and 'nvidia' not in line and 'samba' not in line:\n lineidd = line.lstrip()\n pkgname, version = Add_Entry.parse_multi(self, line)\n packages.append(pkgname)\n versions.append(version)\n return packages, versions \n f.close()\n \n\n","sub_path":"add_newpkg.py","file_name":"add_newpkg.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"593625683","text":"import inspect\nimport random\nimport re\nfrom typing import Set, cast, get_args, get_origin\n\nfrom devtools import debug\nfrom pepperbot.exceptions import EventHandlerDefineError\nfrom pepperbot.globals import *\nfrom pepperbot.parse import (\n GROUP_EVENTS,\n GROUP_EVENTS_T,\n GroupEvent,\n is_valid_friend_method,\n is_valid_group_method,\n)\nfrom pepperbot.parse.bots import *\nfrom pepperbot.parse.kwargs import (\n DEFAULT_KWARGS,\n HANDLER_KWARGS_MAP,\n HandlerKwarg,\n construct_GroupCommonBot,\n)\nfrom pepperbot.utils.common import get_own_methods\n\n\ndef cache():\n # groupHandler可能有多个装饰器,比如register, withCommand\n # 先解析为与装饰器名称相关的缓存至groupMeta,\n # 解析完所有装饰器之后,再生成classHandlers.groupCache中的缓存\n # 生成缓存时,确保register的存在,不然报错(withCommand也可以向group中推送meta信息)\n # 这才是真正的meta,全局保存class和对应的meta,而不是绑定到class上,可能会涉及到bound和unbound的问题\n _cache = classHandlers.groupCache\n\n # 多个group handler,相同command的处理(解析所有指令和groupId,重新生成缓存)\n uniqueCommandClasses: Set[Callable] = set()\n\n for handlerClass, classMeta in classHandlers.groupMeta.items():\n\n # 检查事件响应的参数\n if not GLOBAL_CONFIG[\"TEST_MODE\"]:\n for method in get_own_methods(handlerClass):\n if is_valid_group_method(method.__name__):\n # before和after钩子的参数和正常响应相同\n handlerName: str = re.sub(r\"^before_\", \"\", method.__name__)\n handlerName = re.sub(r\"^after_\", \"\", method.__name__)\n handlerName = cast(GROUP_EVENTS_T, handlerName)\n\n kwargList: List[HandlerKwarg] = HANDLER_KWARGS_MAP.get(\n handlerName, DEFAULT_KWARGS\n )\n kwargList.append(\n HandlerKwarg(\n name=\"event\",\n type=Union[dict, Dict, Dict[str, Any]],\n value=None,\n )\n )\n\n kwargNameTypeMap = {}\n for kwarg in kwargList:\n kwargNameTypeMap[kwarg.name] = kwarg.type\n\n kwargNames = kwargNameTypeMap.keys()\n\n # debug(method.__name__)\n [args, varargs, varkw] = inspect.getargs(method.__code__)\n\n usableKwargsHint = \"\\n可用的参数及类型有\"\n kwargsLength = len(kwargNameTypeMap)\n for index, (kwargName, kwargType) in enumerate(\n kwargNameTypeMap.items(), start=1\n ):\n usableKwargsHint += f\"{kwargName}: {kwargType}\"\n\n if index != kwargsLength:\n usableKwargsHint += \", \"\n\n for argName in args[1:]:\n if argName not in kwargNames:\n raise EventHandlerDefineError(\n f\"{inspect.getsourcefile(handlerClass)}中的类响应器{handlerClass.__name__}的\"\n f\"{method.__name__}事件不存在参数{argName}\" + usableKwargsHint\n )\n\n if argName not in method.__annotations__.keys():\n raise EventHandlerDefineError(\n f\"{inspect.getsourcefile(handlerClass)}中的类响应器{handlerClass.__name__}的\"\n f\"{method.__name__}事件的参数{argName}未提供类型注解,其类型为{kwargNameTypeMap[argName]}\"\n + usableKwargsHint\n )\n\n if varargs or varkw:\n raise EventHandlerDefineError(\n f\"{inspect.getsourcefile(handlerClass)}中的类响应器{handlerClass.__name__}的\"\n f\"{method.__name__}事件不需要提供*或者**参数,PepperBot会自动根据声明的参数以及类型注入\"\n + usableKwargsHint\n )\n\n # debug(method.__annotations__)\n\n for argName, argType in method.__annotations__.items():\n if argName not in kwargNames:\n raise EventHandlerDefineError(\n f\"{inspect.getsourcefile(handlerClass)}中的类响应器{handlerClass.__name__}的\"\n f\"{method.__name__}事件不存在参数{argName}\" + usableKwargsHint\n )\n\n kwargType = kwargNameTypeMap[argName]\n\n wrongTypeFlag = True\n if get_origin(kwargType) is Union:\n for _type in get_args(kwargType):\n if _type == argType:\n wrongTypeFlag = False\n\n if kwargType == argType:\n wrongTypeFlag = False\n\n if wrongTypeFlag:\n raise EventHandlerDefineError(\n f\"{inspect.getsourcefile(handlerClass)}中的类响应器{handlerClass.__name__}的\\n\"\n + f\"{method.__name__}事件的参数{argName}的类型应该为{kwargType},而不是{argType}\"\n )\n\n # 不要每次都创建实例,on/register监听器新增时建立一次保存起来即可\n _instance = handlerClass()\n\n decoratorNames = list(classMeta.decorators.keys())\n if \"register\" not in decoratorNames:\n raise Exception(\"基于class的事件响应处理器必须通过register装饰器注册\")\n\n currentCommandClasses: List[Callable] = []\n\n # 除register以外的class级装饰器\n decoratorNames.remove(\"register\")\n\n for decoratorName in decoratorNames:\n argDict = classMeta.decorators[decoratorName]\n args = argDict.args\n kwargs = argDict.kwargs\n\n if decoratorName == \"with_command\":\n commandClasses: List[Callable] = kwargs[\"commandClasses\"]\n\n currentCommandClasses = [*commandClasses]\n\n for commandClass in commandClasses:\n uniqueCommandClasses.add(commandClass)\n\n # elif decoratorName == \"\"\n\n # 处理完所有其它装饰器后,最后处理register,建立groupId与classHandler的映射\n finalGroupIds: List[int] = []\n\n groupId = classMeta.decorators[\"register\"].kwargs[\"groupId\"]\n\n if isinstance(groupId, int):\n finalGroupIds.append(groupId)\n\n elif isinstance(groupId, str):\n finalGroupIds.append(int(groupId))\n\n elif isinstance(groupId, list):\n for id in groupId:\n finalGroupIds.append(int(id))\n\n for id in finalGroupIds:\n\n # 是否可以只实例化一次botInstance?动态注入groupId\n # 似乎每个群一个bot,效果更好一点\n groupCache = GroupCache(\n instance=_instance,\n # 为每一个group,缓存一个GroupCommonBot\n botInstance=construct_GroupCommonBot({\"group_id\": id}, cast(Any, None)),\n commandClasses=currentCommandClasses,\n )\n\n # 初始化时,遍历生成缓存,不要每次接收到消息都去遍历\n for method in get_own_methods(_instance):\n if is_valid_group_method(method.__name__):\n groupCache.methods[method.__name__].append(method)\n\n else:\n print(\"无效的hook\")\n debug(method)\n\n _cache[id].append(groupCache)\n\n for method in get_own_methods(_instance):\n if is_valid_friend_method(method.__name__):\n\n friendCache = FriendCache(\n instance=_instance,\n # commandClasses=currentCommandClasses,\n )\n\n friendCache.methods[method.__name__].append(method)\n classHandlers.friendCache.append(friendCache)\n\n elif GroupEvent.temp_message == method.__name__:\n\n tempCache = FriendCache(\n instance=_instance,\n )\n\n tempCache.methods[method.__name__].append(method)\n classHandlers.tempCache.append(tempCache)\n\n else:\n print(\"无效的hook\")\n debug(method)\n\n # 在提取所有可能的with_command装饰器之后执行\n # 同一个commandClass,就实例化一次\n for commandClass in uniqueCommandClasses:\n\n commandInstance = commandClass()\n\n commandKwargs: Dict = getattr(commandClass, \"kwargs\")\n # debug(commandKwargs)\n\n commandBuffer = CommandCache(instance=commandInstance, kwargs=commandKwargs)\n\n for method in get_own_methods(commandInstance):\n commandBuffer.methods[method.__name__] = method\n\n classHandlers.commandCache[commandClass] = commandBuffer\n\n maxSize = commandKwargs[\"maxSize\"]\n timeout = commandKwargs[\"timeout\"]\n mode = commandKwargs[\"mode\"]\n\n commandContext = CommandContext(\n maxSize=maxSize or globalContext.maxSize,\n timeout=timeout or globalContext.timeout,\n mode=mode,\n )\n\n globalContext.cache[commandClass] = commandContext\n","sub_path":"pepperbot/parse/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":9728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"634072157","text":"import os\nimport os.path\n\nnand_dff_count = {}\n\ndef get_hdl_files():\n hdl_files = {}\n for dirname, _, filenames in os.walk('.'):\n for filename in filenames:\n chip, ext = os.path.splitext(filename)\n if ext == '.hdl':\n hdl_files[chip] = os.path.join(dirname, filename)\n return hdl_files\n\ndef parse_chip(chip_name, hdl_files):\n f = open(hdl_files[chip_name])\n in_parts = False\n subchips = []\n for line in f.readlines():\n line = line.strip()\n if 'PARTS' in line:\n in_parts = True\n continue\n if in_parts:\n idx = line.find('(')\n if idx != -1:\n chip = line[:idx]\n subchips.append(chip)\n f.close()\n\n return subchips\n\ndef count_gates_in_chip(chip_name, hdl_files):\n if chip_name == 'Nand':\n return (1, 0)\n if chip_name == 'DFF':\n return (0, 1)\n\n nand_total = 0\n dff_total = 0\n\n for subchip in parse_chip(chip_name, hdl_files):\n if subchip in nand_dff_count:\n nand, dff = nand_dff_count[subchip]\n else:\n nand, dff = count_gates_in_chip(subchip, hdl_files)\n nand_dff_count[subchip] = (nand, dff)\n\n nand_total += nand\n dff_total += dff\n\n return (nand_total, dff_total)\n\n\nif __name__ == '__main__':\n hdl_files = get_hdl_files()\n for chip in hdl_files:\n nand, dff = count_gates_in_chip(chip, hdl_files)\n print(f'{chip}\\t\\t\\t\\t\\t{nand}\\t\\t\\t\\t\\t{dff}')\n\n","sub_path":"gate_counter.py","file_name":"gate_counter.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"81379829","text":"\n\nclass knapsack() :\n\n\n def __init__(self,weight,value,W):\n\n self.weight=weight\n self.n=len(weight)\n self.value=value\n self.maxval=0\n self.W=W\n\n\n def find(self,W,k,curValue) :\n if W>=0 :\n if k>=self.n :\n if self.maxval 5:\r\n self.logger.info('overlay not found')\r\n return False\r\n except:\r\n self.wait_for_loading()\r\n\r\n def get_titlebar_title(self):\r\n self.logger.info('Titlebar title: {}'.format(self._get_element(\"com.telvent.weathersentry:id/titlebar_title\").text))\r\n return self._get_element(\"com.telvent.weathersentry:id/titlebar_title\").text\r\n\r\n def titlebar_submit(self):\r\n self.click(\"com.telvent.weathersentry:id/titlebar_right_imagebutton\")\r\n #\r\n self.wait_for_loading()\r\n #\r\n self.logger.info('clicked SUBMIT button on titlebar')\r\n\r\n def titlebar_back(self):\r\n self.click(\"com.telvent.weathersentry:id/titlebar_left_imagebutton\")\r\n #\r\n self.wait_for_loading()\r\n #\r\n self.logger.info('clicked Back button on titlebar')\r\n\r\n def clear(self, id):\r\n self._get_element(id).clear()\r\n self.logger.info('cleared text field: {}'.format(id))\r\n\r\n def type(self, id, text):\r\n self.clear(id)\r\n self._get_element(id).send_keys(text)\r\n self.logger.info('typed text: {} into text field: {}'.format(text, id))\r\n\r\n def close_alert_and_get_text(self):\r\n self.wait_for_loading()\r\n i=0\r\n try:\r\n if self.is_element_present(\"android:id/button1\"):\r\n alert_text = self.get_text(\"android:id/message\")\r\n self.accept_android_overlay_notification()\r\n self.logger.info('found alert with text: {}'.format(alert_text))\r\n return alert_text\r\n elif i > 5:\r\n self.logger.info('Did not find alert')\r\n return False\r\n else:\r\n i+=1\r\n except NoSuchElementException:\r\n i+=1\r\n time.sleep(1)\r\n\r\n def wait_for_loading(self):\r\n time.sleep(1)\r\n try:\r\n for i in range(5):\r\n if self.is_element_present('new UiSelector().text(\"Loading\")', ui_auto=True):\r\n time.sleep(2)\r\n else:\r\n return True\r\n return False\r\n except NoSuchElementException:\r\n return True\r\n except StaleElementReferenceException:\r\n return True\r\n\r\n\r\n\r\n # def check_saving_criteria(self, element, action):\r\n #TODO: click on criteria, get state, perform action, save, confirm message, re-enter, verify saved, uncheck, save again\r\n\r\n def compare_lists(self, expected, actual):\r\n results =[]\r\n for e in expected:\r\n if e in actual:\r\n pass\r\n else:\r\n results.append('did not find in actual list: '+format(e))\r\n for a in actual:\r\n if a in expected:\r\n pass\r\n else:\r\n results.append('did not find in expected list: '+format(a))\r\n if len(results) == 0:\r\n return True\r\n else:\r\n return False\r\n\r\n def zoom(self):\r\n element = self._get_element('com.telvent.weathersentry:id/map_mapview').size\r\n xx = element['width'] / 2\r\n yy = element['height'] / 2\r\n print('xx:', xx)\r\n print('yy:', yy)\r\n a1 = TouchAction(self.driver)\r\n a2 = TouchAction(self.driver)\r\n ma = MultiAction(self.driver)\r\n a1.press(x=xx-20, y=yy-20).move_to(x=-10, y=-10).wait(1000).release()\r\n a2.press(x=xx+20, y=yy+20).move_to(x=+10, y=+10).wait(1000).release()\r\n ma.add(a1, a2)\r\n ma.perform()\r\n\r\n def zoom2(self, to_loc):\r\n window_size = self.driver.get_window_size()\r\n xx = window_size[\"width\"] / 2\r\n yy = window_size[\"height\"] / 2\r\n to = abs(to_loc - yy)\r\n action1 = TouchAction(self.driver)\r\n action1.press(x=200, y=205)\r\n action1_1 = TouchAction(self.driver)\r\n action1_1.move_to(x=0, y=10)\r\n action1_2 = TouchAction(self.driver)\r\n action1_2.wait(200).release()\r\n\r\n action2 = TouchAction(self.driver)\r\n action1.press(x=200, y=195)\r\n action2_1 = TouchAction(self.driver)\r\n action2_1.move_to(x=0, y=-10)\r\n action2_2 = TouchAction(self.driver)\r\n action2_2.wait(200).release()\r\n\r\n m_action = MultiAction(self.driver)\r\n m_action.add(action1, action2)\r\n m_action.perform()\r\n m_action2 = MultiAction(self.driver)\r\n m_action2.add(action1_1, action2_1)\r\n m_action2.perform()\r\n m_action3 = MultiAction(self.driver)\r\n m_action3.add(action1_2, action2_2)\r\n m_action3.perform()\r\n\r\n def scroll_up(driver):\r\n height = driver._get_element('android:id/content').size['height']\r\n y = height / 3\r\n driver.driver.swipe(100, y-(y*.55), 100, y+(y*1.2), 500)\r\n\r\n def scroll_up_a_little(driver):\r\n height = driver._get_element('android:id/content').size['height']\r\n y = height / 5\r\n driver.driver.swipe(100, y - (y * .75), 100, y + (y * .75), 500)\r\n\r\n def scroll_down(driver):\r\n height = driver._get_element('android:id/content').size['height']\r\n y = height / 2\r\n driver.driver.swipe(100, y+(y*.90), 100, y-(y*.95), 500)\r\n\r\n def scroll_down_a_little(driver):\r\n height = driver._get_element('android:id/content').size['height']\r\n y = height / 5\r\n driver.driver.swipe(100, y + (y * .75), 100, y - (y * .75), 500)\r\n\r\n def back_out_to_main_page(self):\r\n try:\r\n for i in range(7): # keep backing out until back at main menu\r\n try:\r\n if not self.is_element_present('com.telvent.weathersentry:id/home_screen_maps'):\r\n self.logger.info(\"driver back = {}\".format(i))\r\n self.driver.back()\r\n time.sleep(1)\r\n else:\r\n self.logger.info(\"Found Home Page\")\r\n break\r\n except:\r\n self.logger.info(\"driver back = {}\".format(i))\r\n self.driver.back()\r\n time.sleep(1)\r\n if not self.is_element_present(\"com.telvent.weathersentry:id/titlebar_ll\"):\r\n print('launching app from backout to main page')\r\n self.driver.launch_app()\r\n time.sleep(1)\r\n except:\r\n self.driver.background_app(1)\r\n time.sleep(1)\r\n\r\n def compare_screens(self, layer1, layer2, noDelete=''):\r\n from PIL import Image\r\n import math\r\n import operator\r\n import sys\r\n import functools\r\n self.tempImageCompare = os.path.join(sys.path[0] + \"\\\\data\\\\TempImageCompare\")\r\n i1 = Image.open(self.tempImageCompare + \"\\\\\" + layer1 + \".png\").histogram()\r\n i2 = Image.open(self.tempImageCompare + \"\\\\\" + layer2 + \".png\").histogram()\r\n rms = math.sqrt(functools.reduce(operator.add,\r\n map(lambda a, b: (a - b) ** 2, i1, i2)) / len(i1))\r\n self.logger.info('layer {} compred to layer {} compare score= {}'.format(layer1,layer2, rms))","sub_path":"android/core/base_page.py","file_name":"base_page.py","file_ext":"py","file_size_in_byte":11977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"339905200","text":"import logging\nimport os\n\nfrom src import android, appium, log\n\nlogger = logging.getLogger('service')\n\n# not using enum because need to install pip that will make docker image size bigger\nTYPE_ARMEABI = 'armeabi'\nTYPE_X86 = 'x86'\n\n\ndef start():\n \"\"\"\n Installation of needed sdk package, creation of android emulator and execution of appium server.\n\n \"\"\"\n # Get all needed environment variables\n android_path = os.getenv('ANDROID_HOME', '/root')\n logger.info('Android path: {path}'.format(path=android_path))\n emulator_type = os.getenv('EMULATOR_TYPE', TYPE_ARMEABI).lower()\n logger.info('Emulator type: {type}'.format(type=emulator_type))\n android_version = os.getenv('ANDROID_VERSION', '4.2.2')\n logger.info('Android version: {version}'.format(version=android_version))\n connect_to_grid = str_to_bool(str(os.getenv('CONNECT_TO_GRID', False)))\n logger.info('Connect to selenium grid? {input}'.format(input=connect_to_grid))\n\n # Install needed sdk packages\n emulator_type = TYPE_ARMEABI if emulator_type not in [TYPE_ARMEABI, TYPE_X86] else emulator_type\n emulator_file = 'emulator64-x86' if emulator_type == TYPE_X86 else 'emulator64-arm'\n logger.info('Emulator file: {file}'.format(file=emulator_file))\n api_level = android.get_api_level(android_version)\n sys_img = 'x86_64' if emulator_type == TYPE_X86 else 'armeabi-v7a'\n logger.info('System image: {sys_img}'.format(sys_img=sys_img))\n android.install_package(android_path, emulator_file, api_level, sys_img)\n\n # Create android virtual device\n device_name = os.getenv('DEVICE', 'Nexus 5')\n logger.info('Device: {device}'.format(device=device_name))\n skin_name = device_name.replace(' ', '_').lower()\n logger.info('Skin: {skin}'.format(skin=skin_name))\n avd_name = '{device}_{version}'.format(device=skin_name, version=android_version)\n logger.info('AVD name: {avd}'.format(avd=avd_name))\n android.create_avd(android_path, device_name, skin_name, avd_name, api_level)\n\n # Run appium server\n appium.run(connect_to_grid, avd_name, android_version)\n\n\ndef str_to_bool(str):\n \"\"\"\n Convert string to boolean.\n\n :param str: given string\n :type str: str\n :return: converted string\n :rtype: bool\n \"\"\"\n return str.lower() in ('yes', 'true', 't', '1')\n\nif __name__ == '__main__':\n log.init()\n start()\n","sub_path":"src/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"588995286","text":"#!/usr/bin/python3\n\"\"\"\n Test for class Review Model\n\"\"\"\nimport unittest\nimport pep8\nfrom models.engine import file_storage\nfrom models.engine.file_storage import FileStorage\nimport os\n\n\nclass TestPep8B(unittest.TestCase):\n \"\"\" Check for pep8 validation. \"\"\"\n def test_pep8(self):\n \"\"\" test base and test_base for pep8 conformance \"\"\"\n style = pep8.StyleGuide(quiet=True)\n file1 = 'models/engine/file_storage.py'\n file2 = 'tests/test_models/test_engine/test_file_storage.py'\n result = style.check_files([file1, file2])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warning).\")\n\n\nclass TestDocsB(unittest.TestCase):\n \"\"\" Check for documentation. \"\"\"\n def test_module_doc(self):\n \"\"\" check for module documentation \"\"\"\n self.assertTrue(len(file_storage.__doc__) > 0)\n\n def test_class_doc(self):\n \"\"\" Check for documentation \"\"\"\n self.assertTrue(len(FileStorage.__doc__) > 0)\n\n def test_method_docs(self):\n \"\"\" Check for method documentation \"\"\"\n for func in dir(FileStorage):\n self.assertTrue(len(func.__doc__) > 0)\n\n\nclass TestFileStorage(unittest.TestCase):\n \"\"\"Class FileStorage \"\"\"\n\n def check_instance(self):\n \"\"\" Check the existence of instance \"\"\"\n self.assertIsInstance(self.storage_1, FileStorage)\n\n def test_permissions(self):\n \"\"\"test read-write-execute permissions\"\"\"\n read = os.access('models/engine/file_storage.py', os.R_OK)\n self.assertTrue(read)\n write = os.access('models/engine/file_storage.py', os.W_OK)\n self.assertTrue(write)\n exe = os.access('models/engine/file_storage.py', os.X_OK)\n self.assertTrue(exe)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_models/test_engine/test_file_storage.py","file_name":"test_file_storage.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"100240691","text":"# -*- coding: utf-8 -*-\n########################################cifar100############################\nimport pickle as p\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as plimg\nfrom PIL import Image\nimport os\n \ndef load_CIFAR_batch(filename, folder_name):\n \"\"\" load single batch of cifar \"\"\"\n with open(filename, 'rb')as f:\n datadict = p.load(f)\n \"\"\" \n cifar100 data content: \n { \n \"coarse_labels\":[0,...,19], # 0~19 super category \n \"filenames\":[\"volcano_s_000012.png\",...], \n \"batch_label\":\"\", \n \"fine_labels\":[0,1...99] # 0~99 category \n } \n return list of numpy arrays [na,...,na] with specific batch_size \n na: N dimensional numpy array \n \"\"\" \n # print(datadict.keys())\n ## ['data', 'batch_label', 'fine_labels', 'coarse_labels', 'filenames']\n \n batch_label = datadict['batch_label']\n fine_labels = datadict['fine_labels']\n coarse_labels = datadict['coarse_labels']\n batch_label = np.array(batch_label)\n fine_labels = np.array(fine_labels)\n coarse_labels = np.array(coarse_labels)\n \n X = datadict['data']\n # print(X.shape)\n if(folder_name == \"train\"):\n X = X.reshape(50000, 3, 32, 32)\n elif(folder_name == \"validation\"):\n X = X.reshape(10000, 3, 32, 32)\n \n return X, batch_label, fine_labels, coarse_labels\n \ndef unpickle(file): \n import cPickle\n fo = open(file, 'rb')\n dict = cPickle.load(fo)\n fo.close()\n return dict\n \n \nfilename = \"./cifar-100-python/meta\"\ndict_meta_batch = unpickle(filename)\n \ndef get_class_name(lable_id):\n \n fine_label_names_list = dict_meta_batch['fine_label_names']\n class_name = fine_label_names_list[lable_id]\n \n return class_name\n \ndef create_dir(dir_path):\n if not os.path.exists(dir_path):\n print(\"Create dir = {}\".format(dir_path))\n os.makedirs(dir_path)\n \ntrain_img_num = 0\ntest_img_num = 0\ndef visualize_data(binary_img_path_list, folder_name):\n global train_img_num\n global test_img_num\n for item in binary_img_path_list:\n # imgX, imgY = load_CIFAR_batch(item)\n imgX, batch_label, fine_labels, coarse_labels = load_CIFAR_batch(item, folder_name)\n print(\"image [{}] saving...\".format(item))\n xx = 0\n for i in xrange(imgX.shape[0]):\n imgs = imgX[i - 1]\n # print(\"fine_labels = {}\".format(fine_labels[i-1]))\n img0, img1, img2 = imgs[0], imgs[1], imgs[2]\n i0 = Image.fromarray(img0)\n i1 = Image.fromarray(img1)\n i2 = Image.fromarray(img2)\n img = Image.merge(\"RGB\",(i0,i1,i2))\n \n class_name = get_class_name(fine_labels[i-1])\n print(\"class_name = {}\".format(class_name))\n \n if(folder_name == \"train\"):\n name = class_name + \"_\" + str(train_img_num) + \".png\"\n else:\n name = class_name + \"_\" + str(test_img_num) + \".png\"\n \n save_path = os.path.join(folder_name, class_name, name)\n create_dir(os.path.join(folder_name, class_name))\n img.save(save_path, \"png\")\n \n train_img_num += 1\n test_img_num += 1\n \n # xx += 1\n # if(xx>10):\n # break\n \n \nif __name__ == \"__main__\":\n \n binary_img_path_list = []\n binary_img_path_list.append(\"./cifar-100-python/train\")\n visualize_data(binary_img_path_list, \"train\")\n \n test_binary_img_path_list = []\n test_binary_img_path_list.append(\"./cifar-100-python/test\")\n visualize_data(test_binary_img_path_list, \"validation\")\n\n########################################cifar10############################\n# -*- coding: utf-8 -*-\n \nimport pickle as p\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.image as plimg\nfrom PIL import Image\nimport os\n \ndef load_CIFAR_batch(filename):\n \"\"\" load single batch of cifar \"\"\"\n with open(filename, 'rb')as f:\n datadict = p.load(f)\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32)\n Y = np.array(Y)\n return X, Y\n \ndef load_CIFAR_Labels(filename):\n with open(filename, 'rb') as f:\n lines = [x for x in f.readlines()]\n print(lines)\n \ndef get_class_name(lable_id):\n if(lable_id==\"0\"):\n class_name = \"airplane\"\n elif(lable_id==\"1\"):\n class_name = \"automobile\"\n \n elif(lable_id==\"2\"):\n class_name = \"bird\"\n \n elif(lable_id==\"3\"):\n class_name = \"cat\"\n \n elif(lable_id==\"4\"):\n class_name = \"deer\"\n \n elif(lable_id==\"5\"):\n class_name = \"dog\"\n \n elif(lable_id==\"6\"):\n class_name = \"frog\"\n \n elif(lable_id==\"7\"):\n class_name = \"horse\"\n \n elif(lable_id==\"8\"):\n class_name = \"ship\"\n \n elif(lable_id==\"9\"):\n class_name = \"truck\"\n \n return class_name\n \ndef create_dir(dir_path):\n if not os.path.exists(dir_path):\n print(\"Create dir = {}\".format(dir_path))\n os.makedirs(dir_path)\n \ntrain_img_num = 0\ntest_img_num = 0\ndef visualize_data(binary_img_path_list, folder_name):\n global train_img_num\n global test_img_num\n for item in binary_img_path_list:\n imgX, imgY = load_CIFAR_batch(item)\n print(\"image [{}] saving...\".format(item))\n xx = 0\n for i in xrange(imgX.shape[0]):\n imgs = imgX[i - 1]\n print(\"imgY = {}\".format(imgY[i-1]))\n img0, img1, img2 = imgs[0], imgs[1], imgs[2]\n i0 = Image.fromarray(img0)\n i1 = Image.fromarray(img1)\n i2 = Image.fromarray(img2)\n img = Image.merge(\"RGB\",(i0,i1,i2))\n \n \n if(folder_name == \"train\"):\n name = folder_name + \"_\" + str(train_img_num) + \".png\"\n else:\n name = folder_name + \"_\" + str(test_img_num) + \".png\"\n \n class_name = get_class_name(str(imgY[i-1]))\n save_path = os.path.join(folder_name, class_name, name)\n create_dir(os.path.join(folder_name, class_name))\n img.save(save_path, \"png\")\n \n train_img_num += 1\n test_img_num += 1\n \n # xx += 1\n # if(xx>10):\n # break\n \n \nif __name__ == \"__main__\":\n # load_CIFAR_Labels(\"./cifar-10-batches-py/batches.meta\")\n \n binary_img_path_list = []\n binary_img_path_list.append(\"./cifar-10-batches-py/data_batch_1\")\n binary_img_path_list.append(\"./cifar-10-batches-py/data_batch_2\")\n binary_img_path_list.append(\"./cifar-10-batches-py/data_batch_3\")\n binary_img_path_list.append(\"./cifar-10-batches-py/data_batch_4\")\n binary_img_path_list.append(\"./cifar-10-batches-py/data_batch_5\")\n \n visualize_data(binary_img_path_list, \"train\")\n \n test_binary_img_path_list = []\n test_binary_img_path_list.append(\"./cifar-10-batches-py/test_batch\")\n visualize_data(test_binary_img_path_list, \"validation\")\n\n########################################mnist############################\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n \n \nfrom PIL import Image\nimport struct\n \n \ndef read_image(filename):\n f = open(filename, 'rb')\n index = 0\n buf = f.read()\n f.close()\n \n magic, images, rows, columns = struct.unpack_from('>IIII' , buf , index)\n index += struct.calcsize('>IIII')\n \n # xrange has become range in python 3\n for i in range(images):\n # for i in xrange(2000):\n # 创建一张空白的图片,其中的’L’代表这张图片是灰度图\n image = Image.new('L', (columns, rows))\n for x in range(rows):\n for y in range(columns):\n image.putpixel((y, x), int(struct.unpack_from('>B', buf, index)[0]))\n index += struct.calcsize('>B')\n \n print('save ' + str(i) + ' image')\n image.save('test_data/' + str(i) + '.png')\n \n \ndef read_label(filename, saveFilename):\n f = open(filename, 'rb')\n index = 0\n buf = f.read()\n \n f.close()\n \n magic, labels = struct.unpack_from('>II' , buf , index)\n index += struct.calcsize('>II')\n \n labelArr = [0] * labels\n #labelArr = [0] * 2000\n \n \n for x in range(labels):\n #for x in xrange(2000):\n labelArr[x] = int(struct.unpack_from('>B', buf, index)[0])\n index += struct.calcsize('>B')\n \n save = open(saveFilename, 'w')\n \n save.write(','.join(map(lambda x: str(x), labelArr)))\n save.write('\\n')\n \n save.close()\n print('save labels success')\n \n \nif __name__ == '__main__':\n read_image('t10k-images.idx3-ubyte')\n read_label('t10k-labels.idx1-ubyte', 'test_data/label.txt')\n","sub_path":"paras_cifar10_100_mnist.py","file_name":"paras_cifar10_100_mnist.py","file_ext":"py","file_size_in_byte":8690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"299935891","text":"\"\"\"A collection of Methods to support the Change History feature in DFCX.\"\"\"\n\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom typing import Dict\n\nimport numpy as np\nimport pandas as pd\nimport requests\n\nfrom dfcx_scrapi.core.scrapi_base import ScrapiBase\n\n# logging config\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n)\n\n\nclass ChangeHistory(ScrapiBase):\n \"\"\"Tools class that contains methods to support Change History feature.\"\"\"\n def __init__(\n self,\n creds_path: str = None,\n creds_dict: Dict = None,\n creds = None,\n scope = False,\n agent_id = None\n ):\n super().__init__(\n creds_path=creds_path,\n creds_dict=creds_dict,\n creds=creds,\n scope=scope\n )\n\n if agent_id:\n self.agent_id = agent_id\n\n def get_change_history(self, agent_id: str = None):\n \"\"\"Extract the Change History log for a single DFCX Agent.\n\n Args:\n agent_id, the formatted CX Agent ID\n\n Returns:\n logs, a List of logs from the Agent ID\n \"\"\"\n if not agent_id:\n agent_id = self.agent_id\n\n location = agent_id.split(\"/\")[3]\n if location != \"global\":\n base_url = \"https://{}-dialogflow.googleapis.com/v3alpha1\".format(\n location\n )\n else:\n base_url = \"https://dialogflow.googleapis.com/v3alpha1\"\n\n url = \"{0}/{1}/changelogs\".format(base_url, agent_id)\n\n headers = {\"Authorization\": \"Bearer {}\".format(self.token)}\n\n # Make REST call\n results = requests.get(url, headers=headers)\n results.raise_for_status()\n\n res = results.json()\n\n logs = []\n for log in res[\"changelogs\"]:\n logs.append(log)\n\n next_token = res.get(\"nextPageToken\",None)\n\n while next_token is not None:\n results = requests.get(\n url, headers=headers, params={\"page_token\": next_token}\n )\n res = results.json()\n for log in res[\"changelogs\"]:\n logs.append(log)\n\n if \"nextPageToken\" in res:\n next_token = res[\"nextPageToken\"]\n else:\n next_token = None\n print(\"All done!\")\n\n return logs\n\n def change_history_to_dataframe(self, agent_id):\n \"\"\"Format the output of get_change_history into a Pandas Dataframe.\n\n Args:\n agent_id, the formatted CX Agent ID\n\n Returns:\n final_dataframe, the final dataframe output of the formatted logs\n \"\"\"\n change_logs = self.get_change_history(agent_id)\n final_dataframe = pd.DataFrame.from_records(data=change_logs)\n\n final_dataframe[\"createTime\"] = pd.to_datetime(\n final_dataframe[\"createTime\"], infer_datetime_format=True\n ) # coerce datetime from CX\n final_dataframe[\"userType\"] = np.where(\n final_dataframe.userEmail.str.contains(\"@google.com\"),\n \"Internal\", \"External\") # determine int/ext user\n\n # TODO: functions to determine which Flow this resource belongs to\n\n return final_dataframe\n","sub_path":"src/dfcx_scrapi/tools/change_history.py","file_name":"change_history.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"446004093","text":"#! usr/bin/env python3\n# _*_coding = utf-8_*_\n\nimport plistlib, re, random, json\n\nwith open( '/Users/allenliu/Desktop/FAIL_C7CS10B0HGKY_2016-08-08 20-56-02_Uart.txt', 'r' ) as f:\n data = f.read()\n\n\ndef findResponse(n,data1):\n pattern = re.compile(r'%s' % n,re.DOTALL)\n response = pattern.split(data1)\n return response\n\n#Items\nRegForItems = re.compile( '=\\s.*\\(.*\\)\\s=' )\narrFind = re.findall( RegForItems, data )\nprint( arrFind )\narrFind1 = []\nfor x in arrFind:\n y = re.sub( '=\\sSTART TEST ', '', x )\n y = re.sub( '\\s=', '', y )\n print( y )\n arrFind1.append( y )\nprint( arrFind1 )\n\n#SendCommand\narrSendCommand = []\narrCommandTmp = re.findall( '\\(TX ==> \\[MOBILE\\]\\):.*\\n', data )\nprint( arrCommandTmp )\nfor x in arrCommandTmp:\n y = re.sub( '\\(TX ==> \\[MOBILE\\]\\):', '', x )\n y = re.sub( '\\n', '', y )\n if y != '':\n arrSendCommand.append( y )\nprint( arrSendCommand )\n\n#ReadCommand\nReadCommand = []\nfor i in range(0, len(arrSendCommand)):\n regexResponse = '\\(RX ==> \\[MOBILE\\]\\):%s.*?:-\\)' % arrSendCommand[i]\n commandResponse = findResponse(regexResponse,data)\n for x in range(0,len(commandResponse)-1):\n print('item %s:\\n %s\\n\\n\\n '%(x,commandResponse[x]))\n ReadCommand.append(y)\n\nprint(len(ReadCommand))\n\nOutPut = ''\nfor x in ReadCommand:\n print('index %s : %s' % (ReadCommand.index(x),x))\n # OutPut\n\n# with open('/Users/allenliu/Desktop/ReadCommand.txt','w') as f:\n# f.write()\n# print(commandResponse)\n\n\n\n\n\n\n\n#","sub_path":"PythonLiaoXueFeng-master/PraseLog.py","file_name":"PraseLog.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"402449559","text":"import numpy as np\nfrom PIL import Image\n\nim = Image.open('image.jpg')\nwidth, height = im.size \npixdata = im.load()\n\nfor i in range(width):\n\tfor j in range(height):\n\t\tred, green, blue = im.getpixel((i, j))\n\t\tvalue = (red + green + blue) / 3\n\t\tif value > 127: \n\t\t\t#red = pixdata[1]\n\t\t\t#green = pixdata[1]\n\t\t\t#blue = pixdata[1]\n\t\t\tpixdata[i,j]=(255,255,255)\n\t\telse:\n\t\t\t#red = pixdata[0]\n\t\t\t#green = pixdata[0]\n\t\t\t#blue = pixdata[0]\n\t\t\tpixdata[i,j] = (0,0,0)\n\n\n \t#value = (r) + (255) + (b)\n\t\t\t#pixdata[i,j] = (int(red),int(green),int(blue))\n\nim.save('inverso.png')\nim.show() \n","sub_path":"src/Inverso.py","file_name":"Inverso.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"230163310","text":"\"\"\"\nCreated on 14 Mar 2018\n\n@author: mjiang\nming.jiang@epfl.ch\n\"\"\"\n\nimport numpy as np\nimport scipy.linalg as LA\n\n\ndef soft(alpha, th):\n \"\"\"\n soft-thresholding function\n\n :param alpha: wavelet coefficients\n :param th: threshold level\n :return: wavelet coefficients after soft threshold\n \"\"\"\n tmp = np.abs(alpha) - th\n tmp = (tmp + np.abs(tmp))/2.\n return np.sign(alpha) * tmp\n\n\ndef hard(alpha, th):\n \"\"\"\n hard-thresholding function\n\n :param alpha: wavelet coefficients\n :param th: threshold level\n :return: wavelet coefficients after hard threshold\n \"\"\"\n return alpha * (np.abs(alpha) > th)\n\n\ndef proj_sc(alpha, rad):\n \"\"\"\n scaling, projection on L2 norm\n\n :param alpha: coefficients to be processed\n :param rad: radius of the l2 ball\n :return: coefficients after projection\n \"\"\"\n return alpha * min(rad/LA.norm(alpha), 1)\n\n\ndef nuclear_norm(mat, th, mode='soft'):\n \"\"\"\n Proximity operator for nuclear norm.\n\n :param mat: input matrix of size [M, N]\n :param th: threshold level, vector of size [K], where K = min(M, N)\n :param mode: 'soft'-threshold or 'hard'-thresholding\n :return: output matrix, the main diagonal of the diagonal matrix (after proximal operation)\n \"\"\"\n U, s, Vh = LA.svd(mat, full_matrices=False)\n if mode == 'soft':\n s1 = soft(s, th)\n S1 = np.diag(s1)\n S1[S1 < 0] = 0\n return np.dot(U, np.dot(S1, Vh)), s1\n elif mode == 'hard':\n s1 = hard(s, th)\n S1 = np.diag(s1)\n S1[S1 < 0] = 0\n return np.dot(U, np.dot(S1, Vh)), s1\n\n\ndef l21_norm(alpha, th, mode='soft', axis=0):\n \"\"\"\n Proximity operator for joint sparsity. l2-norm on the given axis.\n\n :param alpha: coefficients to be processed, matrix of size [M, N]\n :param th: threshold level, the size of th should be consistent with the size of the other axis of alpha,\n e.g. axis = 0, size(th) = N; axis = 1, size(th) = M\n :param mode: 'soft'-threshold or 'hard'-thresholding\n :return: coefficients after joint sparsity, l21-norm (after proximal operation)\n \"\"\"\n import sys\n\n l2norm = LA.norm(alpha, axis=axis)\n alpha_l21 = np.copy(alpha)\n ind = (l2norm > sys.float_info.epsilon)\n if mode == 'soft':\n l2norm_th = soft(l2norm, th)\n elif mode == 'hard':\n l2norm_th = hard(l2norm, th)\n if axis == 0:\n # !Att: multiplication based on array broadcasting\n alpha_l21[:, ind] = np.multiply(l2norm_th[ind] / l2norm[ind], alpha[:, ind])\n elif axis == 1:\n # !Att: multiplication based on array broadcasting\n alpha_l21[ind] = np.multiply(l2norm_th[ind][:, np.newaxis] / l2norm[ind][:, np.newaxis], alpha[ind])\n return alpha_l21, l2norm_th.sum()\n\n\n# Elliptical projection #\ndef proj_ellipse(y, alpha, pU, z0, epsilon, max_iter, min_iter, tol):\n \"\"\"\n Elliptical projection solver\n\n :param y: data, complex vector [M]\n :param alpha: coefficients to be processed, complex vector [M]\n :param pU: preconditioning matrix, real vector [M]\n :param z0: initial guess\n :param epsilon: l2-ball epsilon\n :param max_iter: max iteration\n :param min_iter: min iteration\n :param tol: error tolerance, stopping criterion\n :return: coefficients after projection, real vector [M]\n \"\"\"\n mu = 1. / (pU.max()**2)\n rel_err = 1.\n it = 0\n while (rel_err > tol and it < max_iter) or it < min_iter:\n grad = pU * (z0 - alpha)\n z = y + proj_sc(z0 - mu * grad - y, epsilon)\n rel_err = LA.norm(z - z0) / LA.norm(z)\n z0 = z\n it += 1\n return z\n","sub_path":"Psi/proxTools.py","file_name":"proxTools.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"440687539","text":"import requests\n\nres = requests.get('http://google.com')\n# print('response:', res.status_code)\n#\n# if res.status_code == requests.codes.ok:\n# print('ok')\n# else:\n# print('error code :')\n\n#에러 있으면 종료하고 밑에 코드 내보내지 않음 습관적으로 넣기\n# res.raise_for_status()\n# print('no print')\n\nwith open('text.html', 'w', encoding='utf8') as f:\n f.write(res.text)","sub_path":"requests_practice.py","file_name":"requests_practice.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"516095175","text":"import logging\n\nfrom collections import namedtuple\nfrom distutils.util import strtobool\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\nfrom pathlib import Path\nfrom psycopg2.extras import execute_values\nfrom psycopg2.sql import SQL\nfrom usaspending_api.common.csv_helpers import read_csv_file_as_list_of_dictionaries\nfrom usaspending_api.common.etl import ETLQueryFile, ETLTable, mixins\nfrom usaspending_api.common.helpers.sql_helpers import get_connection\nfrom usaspending_api.common.helpers.text_helpers import standardize_nullable_whitespace as prep\nfrom usaspending_api.common.helpers.timing_helpers import Timer\n\nlogger = logging.getLogger(\"console\")\n\nAgency = namedtuple(\n \"Agency\",\n [\n \"row_number\",\n \"cgac_agency_code\",\n \"agency_name\",\n \"agency_abbreviation\",\n \"frec\",\n \"frec_entity_description\",\n \"frec_abbreviation\",\n \"subtier_code\",\n \"subtier_name\",\n \"subtier_abbreviation\",\n \"toptier_flag\",\n \"is_frec\",\n \"user_selectable\",\n \"mission\",\n \"website\",\n \"congressional_justification\",\n \"icon_filename\",\n ],\n)\n\nMAX_CHANGES = 75\n\n\nclass Command(mixins.ETLMixin, BaseCommand):\n help = (\n \"Loads CGACs, FRECs, Subtier Agencies, Toptier Agencies, and Agencies. Load is all or nothing. \"\n \"If anything fails, nothing gets saved.\"\n )\n\n agency_file = None\n force = False\n\n etl_logger_function = logger.info\n etl_dml_sql_directory = Path(__file__).resolve().parent / \"load_agencies_sql\"\n\n def add_arguments(self, parser):\n\n parser.add_argument(\n \"agency_file\",\n metavar=\"AGENCY_FILE\",\n help=\"Path (for local files) or URI (for http(s) or S3 files) of the raw agency CSV file to be loaded.\",\n )\n\n parser.add_argument(\n \"--force\",\n action=\"store_true\",\n help=(\n \"Reloads agencies even if the max change threshold of {:,} is exceeded. This is a safety \"\n \"precaution to prevent accidentally updating every award, transaction, and subaward in the system as \"\n \"part of the nightly pipeline. Will also force foreign key table links to be examined even if it \"\n \"appears there were no agency changes.\".format(MAX_CHANGES)\n ),\n )\n\n def handle(self, *args, **options):\n\n self.agency_file = options[\"agency_file\"]\n self.force = options[\"force\"]\n\n logger.info(\"AGENCY FILE: {}\".format(self.agency_file))\n logger.info(\"FORCE SWITCH: {}\".format(self.force))\n logger.info(\"MAX CHANGE LIMIT: {}\".format(\"unlimited\" if self.force else \"{:,}\".format(MAX_CHANGES)))\n\n with Timer(\"Load agencies\"):\n try:\n with transaction.atomic():\n self._perform_load()\n t = Timer(\"Commit agency transaction\")\n t.log_starting_message()\n t.log_success_message()\n except Exception:\n logger.error(\"ALL CHANGES ROLLED BACK DUE TO EXCEPTION\")\n raise\n\n try:\n self._vacuum_tables()\n except Exception:\n logger.error(\"CHANGES WERE SUCCESSFULLY COMMITTED EVEN THOUGH VACUUMS FAILED\")\n raise\n\n def _read_raw_agencies_csv(self):\n agencies = read_csv_file_as_list_of_dictionaries(self.agency_file)\n if len(agencies) < 1:\n raise RuntimeError(\"Agency file '{}' appears to be empty\".format(self.agency_file))\n\n self.agencies = [\n Agency(\n row_number=row_number,\n cgac_agency_code=prep(agency[\"CGAC AGENCY CODE\"]),\n agency_name=prep(agency[\"AGENCY NAME\"]),\n agency_abbreviation=prep(agency[\"AGENCY ABBREVIATION\"]),\n frec=prep(agency[\"FREC\"]),\n frec_entity_description=prep(agency[\"FREC Entity Description\"]),\n frec_abbreviation=prep(agency[\"FREC ABBREVIATION\"]),\n subtier_code=prep(agency[\"SUBTIER CODE\"]),\n subtier_name=prep(agency[\"SUBTIER NAME\"]),\n subtier_abbreviation=prep(agency[\"SUBTIER ABBREVIATION\"]),\n toptier_flag=bool(strtobool(prep(agency[\"TOPTIER_FLAG\"]))),\n is_frec=bool(strtobool(prep(agency[\"IS_FREC\"]))),\n user_selectable=bool(strtobool(prep(agency[\"USER SELECTABLE ON USASPENDING.GOV\"]))),\n mission=prep(agency[\"MISSION\"]),\n website=prep(agency[\"WEBSITE\"]),\n congressional_justification=prep(agency[\"CONGRESSIONAL JUSTIFICATION\"]),\n icon_filename=prep(agency[\"ICON FILENAME\"]),\n )\n for row_number, agency in enumerate(agencies, start=1)\n ]\n\n return len(self.agencies)\n\n @staticmethod\n def _validate_raw_agency(agency):\n messages = []\n\n if agency.cgac_agency_code is not None and agency.agency_name is None:\n message = \"Row number {:,} has a CGAC AGENCY CODE but no AGENCY NAME\"\n messages.append(message.format(agency.row_number))\n if agency.frec is not None and agency.frec_entity_description is None:\n messages.append(\"Row number {:,} has a FREC but no FREC Entity Description\".format(agency.row_number))\n if agency.subtier_code is not None and agency.subtier_name is None:\n messages.append(\"Row number {:,} has a SUBTIER CODE but no SUBTIER NAME\".format(agency.row_number))\n if agency.is_frec is True and agency.frec is None:\n messages.append(\"Row number {:,} is marked as IS_FREC but has no FREC\".format(agency.row_number))\n if agency.is_frec is not True and agency.cgac_agency_code is None:\n messages.append(\n \"Row number {:,} is not marked as IS_FREC but has no CGAC AGENCY CODE\".format(agency.row_number)\n )\n if agency.cgac_agency_code and len(agency.cgac_agency_code) != 3:\n messages.append(\n \"Row number {:,} has CGAC AGENCY CODE that is not 3 characters long ({})\".format(\n agency.row_number, agency.cgac_agency_code\n )\n )\n if agency.frec and len(agency.frec) != 4:\n messages.append(\n \"Row number {:,} has FREC that is not 4 characters long ({})\".format(agency.row_number, agency.frec)\n )\n if agency.subtier_code and len(agency.subtier_code) != 4:\n messages.append(\n \"Row number {:,} has SUBTIER CODE that is not 4 characters long ({})\".format(\n agency.row_number, agency.subtier_code\n )\n )\n\n return messages\n\n def _validate_raw_agencies(self):\n\n messages = []\n for agency in self.agencies:\n messages += self._validate_raw_agency(agency)\n\n if messages:\n for message in messages:\n logger.error(message)\n raise RuntimeError(\n \"{:,} problem(s) have been found with the agency file. See log for details.\".format(len(messages))\n )\n\n def _import_raw_agencies(self):\n with get_connection(read_only=False).cursor() as cursor:\n execute_values(\n cursor.cursor,\n \"\"\"\n insert into temp_load_agencies_raw_agency (\n row_number,\n cgac_agency_code,\n agency_name,\n agency_abbreviation,\n frec,\n frec_entity_description,\n frec_abbreviation,\n subtier_code,\n subtier_name,\n subtier_abbreviation,\n toptier_flag,\n is_frec,\n user_selectable,\n mission,\n website,\n congressional_justification,\n icon_filename\n ) values %s\n \"\"\",\n self.agencies,\n page_size=len(self.agencies),\n )\n return cursor.rowcount\n\n def _perform_load(self):\n\n overrides = {\n \"insert_overrides\": {\"create_date\": SQL(\"now()\"), \"update_date\": SQL(\"now()\")},\n \"update_overrides\": {\"update_date\": SQL(\"now()\")},\n }\n\n agency_table = ETLTable(\"agency\", key_overrides=[\"toptier_agency_id\", \"subtier_agency_id\"], **overrides)\n cgac_table = ETLTable(\"cgac\", key_overrides=[\"cgac_code\"])\n frec_table = ETLTable(\"frec\", key_overrides=[\"frec_code\"])\n subtier_agency_table = ETLTable(\"subtier_agency\", key_overrides=[\"subtier_code\"], **overrides)\n toptier_agency_table = ETLTable(\"toptier_agency\", key_overrides=[\"toptier_code\"], **overrides)\n\n agency_query = ETLQueryFile(self.etl_dml_sql_directory / \"agency_query.sql\")\n cgac_query = ETLQueryFile(self.etl_dml_sql_directory / \"cgac_query.sql\")\n frec_query = ETLQueryFile(self.etl_dml_sql_directory / \"frec_query.sql\")\n subtier_agency_query = ETLQueryFile(self.etl_dml_sql_directory / \"subtier_agency_query.sql\")\n toptier_agency_query = ETLQueryFile(self.etl_dml_sql_directory / \"toptier_agency_query.sql\")\n\n self._execute_etl_dml_sql_directory_file(\"raw_agency_create_temp_table\", \"Create raw agency temp table\")\n self._execute_function_and_log(self._read_raw_agencies_csv, \"Read raw agencies csv\")\n self._execute_function(self._validate_raw_agencies, \"Validate raw agencies\")\n self._execute_function_and_log(self._import_raw_agencies, \"Import raw agencies\")\n\n self._delete_update_insert_rows(\"CGACs\", cgac_query, cgac_table)\n self._delete_update_insert_rows(\"FRECs\", frec_query, frec_table)\n\n rows_affected = 0\n rows_affected += self._delete_update_insert_rows(\"toptier agencies\", toptier_agency_query, toptier_agency_table)\n rows_affected += self._delete_update_insert_rows(\"subtier agencies\", subtier_agency_query, subtier_agency_table)\n rows_affected += self._delete_update_insert_rows(\"agencies\", agency_query, agency_table)\n\n if rows_affected > MAX_CHANGES and not self.force:\n raise RuntimeError(\n \"Exceeded maximum number of allowed changes ({:,}). Use --force switch if this was \"\n \"intentional.\".format(MAX_CHANGES)\n )\n\n elif rows_affected > 0 or self.force:\n self._execute_etl_dml_sql_directory_file(\n \"treasury_appropriation_account_update\", \"Update treasury appropriation accounts\"\n )\n self._execute_etl_dml_sql_directory_file(\"transaction_normalized_update\", \"Update transactions\")\n self._execute_etl_dml_sql_directory_file(\"award_update\", \"Update awards\")\n self._execute_etl_dml_sql_directory_file(\"subaward_update\", \"Update subawards\")\n\n else:\n logger.info(\n \"Skipping treasury_appropriation_account, transaction_normalized, \"\n \"awards, and subaward updates since there were no agency changes.\"\n )\n\n def _vacuum_tables(self):\n self._execute_dml_sql(\"vacuum (full, analyze) agency\", \"Vacuum agency table\")\n self._execute_dml_sql(\"vacuum (full, analyze) cgac\", \"Vacuum cgac table\")\n self._execute_dml_sql(\"vacuum (full, analyze) frec\", \"Vacuum frec table\")\n self._execute_dml_sql(\"vacuum (full, analyze) subtier_agency\", \"Vacuum subtier_agency table\")\n self._execute_dml_sql(\"vacuum (full, analyze) toptier_agency\", \"Vacuum toptier_agency table\")\n","sub_path":"usaspending_api/references/management/commands/load_agencies.py","file_name":"load_agencies.py","file_ext":"py","file_size_in_byte":11783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"21379831","text":"\n\ninput = 'input.txt'\noutput = 'output.txt'\n\ndef readinput():\n # Get the words/numbers of the file\n words=[]\n with open(input) as f:\n lines = f.readlines()\n # for line in lines:\n # words.append(line.split()) #words.append(line) will get the line, but this gets the words\n\n return lines\n\n\n\n\ndef writeoutput(line):\n # Output into the output file\n with open(output,'w') as f:\n f.write(str(line)+'\\n')\n\n","sub_path":"Python Assignment 1 (File responses)/Python Assignment 1 (File responses)/Question1 (File responses)/ovcc_grader.py","file_name":"ovcc_grader.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"36350564","text":"\"\"\"\n207. 课程表\n你这个学期必须选修 numCourses 门课程,记为 0 到 numCourses - 1 。\n\n在选修某些课程之前需要一些先修课程。 先修课程按数组 prerequisites 给出,其中 prerequisites[i] = [ai, bi] ,\n表示如果要学习课程 ai 则 必须 先学习课程 bi 。\n\n例如,先修课程对 [0, 1] 表示:想要学习课程 0 ,你需要先完成课程 1 。\n请你判断是否可能完成所有课程的学习?如果可以,返回 true ;否则,返回 false 。\n\n示例 1:\n输入:numCourses = 2, prerequisites = [[1,0]]\n输出:true\n解释:总共有 2 门课程。学习课程 1 之前,你需要完成课程 0 。这是可能的。\n\n示例 2:\n输入:numCourses = 2, prerequisites = [[1,0],[0,1]]\n输出:false\n解释:总共有 2 门课程。学习课程 1 之前,你需要先完成 课程 0 ;并且学习课程 0 之前,你还应先完成课程 1 。这是不可能的。\n\"\"\"\nfrom collections import defaultdict\nfrom typing import List, Dict\n\n\nclass Solution:\n def canFinish(self, numCourses: int, prerequisites: List[List[int]]) -> bool:\n # 构造出边数组\n edges: Dict[int, List[int]] = defaultdict(list)\n # 入度map数据结构记录维护\n in_degree: Dict[int, int] = {x: 0 for x in range(numCourses)}\n\n # 构造所有课程的 出边数组和入度记录\n for pre_pair in prerequisites:\n pre_class = pre_pair[1]\n next_class = pre_pair[0]\n edges[pre_class].append(next_class)\n in_degree[next_class] += 1\n\n # 把入度是0的课程拿出来\n que = []\n for course, ind_num in in_degree.items():\n if ind_num == 0:\n que.append(course)\n\n if not que:\n return False\n\n while que:\n curr_class = que.pop(0)\n edge = edges[curr_class]\n for c in edge:\n in_degree[c] -= 1\n if in_degree[c] == 0:\n que.append(c)\n\n is_can_learn_all = True\n for course, ind_num in in_degree.items():\n if ind_num != 0:\n is_can_learn_all = False\n break\n\n return is_can_learn_all\n\n","sub_path":"content/data_structure_and_algorithm/content/week3/python_prac/class_table.py","file_name":"class_table.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"475772143","text":"# -*- coding: utf-8 -*-\n# @Author: Nessaj\n# @Date: 2019-12-16 10:38:27\n# @Last Modified by: Nessaj\n# @Last Modified time: 2019-12-17 15:59:04\n\nimport json\nimport time\nimport random\n\nallcamera=dict()\nallcamera[\"camera0\"]=0\nallcamera[\"camera1\"]=0\nallcamera[\"camera2\"]=0\nallcamera[\"camera3\"]=0\n\n\ndef get_json(timestamp,location,cameraID,warningType,info=None):\n \"\"\"\n 产生json数据\n Args:\n timestamp:时间戳\n location:摄像头位置\n cameraID:摄像头唯一标识号ID\n warningType:{0:无异常\n 1:黑名单人\n 2:未知人\n 3:外来车\n 4:碰撞\n 5:火情(红外)}\n info:额外备注信息,默认为空\n Returns:\n 返回json格式数据\n 示例:\n {\"time\": 1576324177, \"location\": 1, \"cameraID\": 3, \"warningType\": 3, \"info\": \"fakenews\"}\n \"\"\"\n data=dict()\n\n data['time']=timestamp\n data['location']=location\n data['cameraID']=cameraID\n warninglist=[\"No-Waring\",\"BlackList\",\"UnknowPerson\",\"UnknowCar\",\"CarCrash\",\"Fire\"]\n warning={'warningType':warningType,'warningEvnet':warninglist[warningType]}\n data['warning']=warning\n data['info']=info\n\n myjson=json.dumps(data)\n return myjson\n\n\ndef randomdata(cid=0):\n\n\n # if(random.randint(0,2)==0):\n # timestamp=time.time()\n # location=cid\n # cameraID=cid\n # allcamera[\"camera\"+str(cameraID)]=1\n # warningType=random.randint(0,4)\n # infolist=['fake-data','pseudo-data','mock-data']\n # info=infolist[random.randint(0,2)]\n # randomjson=get_json(timestamp,location,cameraID,warningType,info)\n\n # return randomjson\n # else:\n\n # return None\n\n timestamp=time.time()\n location=cid\n cameraID=cid\n if(random.randint(0,2)==0):\n\n allcamera[\"camera\"+str(cameraID)]=1\n warningType=random.randint(1,5)\n infolist=['fake-data','pseudo-data','mock-data']\n info=infolist[random.randint(0,2)]\n\n else:\n allcamera[\"camera\"+str(cameraID)]=0\n warningType=0\n info=\"No-Waring\"\n randomjson=get_json(timestamp,location,cameraID,warningType,info)\n return randomjson\n\n\nrd0=randomdata(0)\nrd1=randomdata(1)\nrd2=randomdata(2)\nrd3=randomdata(3)\n","sub_path":"mysite/fakedata/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"561035112","text":"# -*- coding: utf-8 -*-\n'''\nModule for interfacing to Junos devices.\n'''\nfrom __future__ import absolute_import\n\n# Import python libraries\nimport logging\nimport json\nfrom lxml import etree\n# Juniper interface libraries\n# https://github.com/Juniper/py-junos-eznc\n\n\ntry:\n # pylint: disable=W0611\n from jnpr.junos import Device\n from jnpr.junos.utils.sw import SW\n from jnpr.junos.utils.scp import SCP\n import jnpr.junos.utils\n import jnpr.junos.cfg\n # pylint: enable=W0611\n HAS_JUNOS = True\nexcept ImportError:\n HAS_JUNOS = False\n\n\n# Set up logging\nlog = logging.getLogger(__name__)\n\n\n# Define the module's virtual name\n__virtualname__ = 'junos'\n\n__proxyenabled__ = ['junos']\n\n\ndef __virtual__():\n '''\n We need the Junos adapter libraries for this\n module to work. We also need a proxymodule entry in __opts__\n in the opts dictionary\n '''\n if HAS_JUNOS and 'proxy' in __opts__:\n return __virtualname__\n else:\n return (False, 'The junos module could not be \\\n loaded: junos-eznc or proxy could not be loaded.')\n\n\ndef facts_refresh():\n '''\n Reload the facts dictionary from the device. Usually only needed\n if the device configuration is changed by some other actor.\n\n Usage:\n\n .. code-block:: bash\n\n salt 'device_name' junos.facts_refresh\n\n '''\n conn = __proxy__['junos.conn']()\n ret = dict()\n ret['out'] = True\n try:\n ret['message'] = conn.facts_refresh()\n\n except Exception as exception:\n ret['message'] = 'Execution failed due to \"{0}\"'.format(exception)\n ret['out'] = False\n\n return ret\n\n\ndef facts():\n '''\n Displays the facts gathered during the connection.\n\n Usage:\n\n .. code-block:: bash\n\n salt 'device_name' junos.facts\n\n '''\n conn = __proxy__['junos.conn']()\n ret = dict()\n ret['message'] = json.dumps(conn.facts)\n ret['out'] = True\n return ret\n\n\ndef call_rpc(cmd=None, *args, **kwargs):\n '''\n This function executes the rpc provided as arguments on the junos device.\n The returned data can be stored in a file whose destination can be\n specified with 'dest' keyword in the arguments.\n\n Usage:\n\n .. code-block:: bash\n\n salt 'device' junos.call_rpc 'get_config' '' terse=True\n\n salt 'device' junos.call_rpc 'get-chassis-inventory' dest=/home/user/rpc_information.txt\n\n\n Options:\n * cmd: the rpc to be executed\n * args: other arguments as taken by rpc call of PyEZ\n * kwargs: keyworded arguments taken by rpc call of PyEZ\n '''\n conn = __proxy__['junos.conn']()\n ret = dict()\n ret['out'] = True\n\n op = dict()\n if '__pub_arg' in kwargs and isinstance(kwargs['__pub_arg'][-1], dict):\n op.update(kwargs['__pub_arg'][-1])\n else:\n op.update(kwargs)\n\n for k, v in op.iteritems():\n op[k] = str(v)\n op['format'] = 'json'\n\n try:\n if cmd in ['get-config', 'get_config']:\n filter_reply = None\n if len(args) > 0:\n filter_reply = etree.XML(args[0])\n ret['message'] = getattr(conn.rpc, cmd.replace('-', '_'))(filter_reply, options=op)\n else:\n ret['message'] = getattr(conn.rpc, cmd.replace('-', '_'))(op)\n\n except Exception as exception:\n\n ret['message'] = 'Execution failed due to \"{0}\"'.format(exception)\n ret['out'] = False\n\n if 'dest' in op:\n f = open(op['dest'], 'w')\n f.write(ret['message'])\n f.close()\n\n return ret\n\n\ndef set_hostname(hostname=None, commit_change=True):\n '''\n To set the name of the device.\n\n Usage:\n\n .. code-block:: bash\n\n salt 'device_name' junos.set_hostname hostname=salt-device\n\n\n Options:\n * hostname: The name to be set.\n * commit_change: Whether to commit the changes.(default=True)\n '''\n conn = __proxy__['junos.conn']()\n ret = dict()\n if hostname is None:\n ret['out'] = False\n return ret\n\n # Added to recent versions of JunOs\n # Use text format instead\n set_string = 'set system host-name {0}'.format(hostname)\n conn.cu.load(set_string, format='set')\n if commit_change:\n return commit()\n else:\n ret['out'] = True\n ret['msg'] = 'set system host-name {0} is queued'.format(hostname)\n\n return ret\n\n\ndef commit():\n '''\n To commit the changes loaded in the candidate configuration.\n\n Usage:\n\n .. code-block:: bash\n\n salt 'device_name' junos.commit\n\n '''\n\n conn = __proxy__['junos.conn']()\n ret = {}\n commit_ok = conn.cu.commit_check()\n if commit_ok:\n try:\n conn.cu.commit(confirm=True)\n ret['out'] = True\n ret['message'] = 'Commit Successful.'\n except Exception as exception:\n ret['out'] = False\n ret['message'] = 'Pre-commit check succeeded but actual commit failed with \"{0}\"'.format(\n exception)\n else:\n ret['out'] = False\n ret['message'] = 'Pre-commit check failed.'\n\n return ret\n\n\ndef rollback():\n '''\n To rollback the last committed configuration changes\n\n Usage:\n\n .. code-block:: bash\n\n salt 'device_name' junos.rollback\n\n '''\n ret = dict()\n conn = __proxy__['junos.conn']()\n\n ret['out'] = conn.cu.rollback(0)\n\n if ret['out']:\n ret['message'] = 'Rollback successful'\n else:\n ret['message'] = 'Rollback failed'\n\n return ret\n\n\ndef diff():\n '''\n Gives the difference between the candidate and the current configuration.\n\n Usage:\n\n .. code-block:: bash\n\n salt 'device_name' junos.diff\n\n '''\n conn = __proxy__['junos.conn']()\n ret = dict()\n ret['out'] = True\n ret['message'] = conn.cu.diff()\n\n return ret\n\n\ndef ping():\n '''\n To check the connection with the device\n\n Usage:\n\n .. code-block:: bash\n\n salt 'device_name' junos.ping\n\n '''\n conn = __proxy__['junos.conn']()\n ret = dict()\n ret['message'] = conn.probe()\n if ret['message']:\n ret['out'] = True\n else:\n ret['out'] = False\n return ret\n\n\ndef cli(command=None):\n '''\n Executes the CLI commands and reuturns the text output.\n\n Usage:\n\n .. code-block:: bash\n\n salt 'device_name' junos.cli 'show version'\n\n\n Options:\n * command: The command that need to be executed on Junos CLI.\n '''\n conn = __proxy__['junos.conn']()\n ret = dict()\n ret['message'] = conn.cli(command)\n ret['out'] = True\n return ret\n\n\ndef shutdown(time=0):\n '''\n Shuts down the device after the given time.\n\n Usage:\n\n .. code-block:: bash\n\n salt 'device_name' junos.shutdown 10\n\n\n Options:\n * time: Time in seconds after which the device should shutdown (default=0)\n '''\n conn = __proxy__['junos.conn']()\n ret = dict()\n sw = SW(conn)\n try:\n shut = sw.poweroff()\n shut(time)\n ret['message'] = 'Successfully powered off.'\n ret['out'] = False\n except Exception as exception:\n ret['message'] = 'Could not poweroff'\n ret['out'] = False\n\n return ret\n\n\ndef install_config(path=None, **kwargs):\n '''\n Installs the given configuration file into the candidate configuration.\n Commits the changes if the commit checks or throws an error.\n\n Usage:\n\n .. code-block:: bash\n\n salt 'device_name' junos.install_config '/home/user/config.set' timeout=300\n\n\n Options:\n * path: Path where the configuration file is present.\n * kwargs: keyworded arguments taken by load fucntion of PyEZ\n '''\n conn = __proxy__['junos.conn']()\n ret = dict()\n ret['out'] = True\n\n if 'timeout' in kwargs:\n conn.timeout = kwargs['timeout']\n\n options = {'path': path}\n\n try:\n conn.cu.load(**options)\n conn.cu.pdiff()\n\n except Exception as exception:\n ret['message'] = 'Could not load configuration due to : \"{0}\"'.format(\n exception)\n ret['out'] = False\n\n if conn.cu.commit_check():\n ret['message'] = 'Successfully loaded and committed!'\n conn.cu.commit()\n else:\n ret['message'] = 'Commit check failed.'\n ret['out'] = False\n conn.cu.rollback()\n\n return ret\n\n\ndef zeroize():\n '''\n Resets the device to default factory settings\n\n Usage:\n\n .. code-block:: bash\n\n salt 'device_name' junos.zeroize\n\n '''\n conn = __proxy__['junos.conn']()\n ret = dict()\n ret['out'] = True\n try:\n conn.cli('request system zeroize')\n ret['message'] = 'Completed zeroize and rebooted'\n except Exception as exception:\n ret['message'] = 'Could not zeroize due to : \"{0}\"'.format(exception)\n ret['out'] = False\n\n return ret\n\n\ndef install_os(path=None, **kwargs):\n '''\n Installs the given image on the device. After the installation is complete the device is rebooted,\n if reboot=True is given as a keyworded argument.\n\n Usage:\n\n .. code-block:: bash\n\n salt 'device_name' junos.install_os '/home/user/junos_image.tgz' reboot=True\n\n\n Options\n * path: Path where the image file is present.\n * kwargs: keyworded arguments to be given such as timeout, reboot etc\n\n '''\n conn = __proxy__['junos.conn']()\n ret = dict()\n ret['out'] = True\n\n if 'timeout' in kwargs:\n conn.timeout = kwargs['timeout']\n\n try:\n install = conn.sw.install(path, progress=True)\n ret['message'] = 'Installed the os.'\n except Exception as exception:\n ret['message'] = 'Installation failed due to : \"{0}\"'.format(exception)\n ret['out'] = False\n\n if 'reboot' in kwargs and kwargs['reboot'] is True:\n rbt = conn.sw.reboot()\n ret['message'] = 'Successfully installed and rebooted!'\n\n return ret\n\n\ndef file_copy(src=None, dest=None):\n '''\n Copies the file from the local device to the junos device.\n\n Usage:\n\n .. code-block:: bash\n\n salt 'device_name' junos.file_copy /home/m2/info.txt info_copy.txt\n\n\n Options\n * src: The sorce path where the file is kept.\n * dest: The destination path where the file will be copied.\n '''\n conn = __proxy__['junos.conn']()\n ret = dict()\n ret['out'] = True\n try:\n with SCP(conn, progress=True) as scp:\n scp.put(src, dest)\n ret['message'] = 'Successfully copied file from {0} to {1}'.format(\n src, dest)\n\n except Exception as exception:\n ret['message'] = 'Could not copy file : \"{0}\"'.format(exception)\n ret['out'] = False\n\n return ret\n","sub_path":"salt/modules/junos.py","file_name":"junos.py","file_ext":"py","file_size_in_byte":10585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"315018325","text":"from bs4 import BeautifulSoup\nimport json\n\nwith open('wiki.html','rb') as htmlFile:\n html = htmlFile.read()\n\nsoup = BeautifulSoup(html, 'html.parser')\nbody = soup.find('tbody')\n\nchars = {}\n\nfor row in body.find_all('tr'):\n head = row.find('th')\n header = head.get_text()\n header = header.split('[')[0]\n header = header.strip()\n try:\n if(row.find_all('i')[0].find('a') is not None):\n series = row.find_all('i')[0].find('a').get_text()\n else:\n series = row.find('i').get_text()\n except:\n series = row.find('a').get_text()\n \n\n if(header.strip() != 'Fighter' and header.strip() != 'Total'):\n chars[header] = series\n print(header + \" : \" + series)\n print(\"\\n\")\n\nwith open('series.json','w') as series:\n json.dump(chars,series)\n# header = body.find_all('th')\n# print(header)\n# row = body.find_all('a')\n# print(row)\n\n# The first tr contains the field names.\n# headings = [th.get_text().strip() for th in table.find(\"thead\").find_all(\"th\")]\n\n# # print(headings)\n\n# table = soup.find('tbody')\n\n# datasets = []\n# for row in table.find_all(\"tr\")[0:]:\n# dataset = dict(zip(headings, (td.get_text() for td in row.find_all(\"td\"))))\n# datasets.append(dataset)\n\n# # print(datasets)\n\n# with open('data.json', 'w') as f:\n# json.dump(datasets, f)","sub_path":"webscrape_wiki.py","file_name":"webscrape_wiki.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"379418569","text":"from klampt.model import trajectory\nfrom shapely.geometry.polygon import Polygon\nimport random\nfrom klampt import vis\nfrom .geometry_object_superclass import _3dGeometrySuperclass\nfrom src.utils.math_utils import MathUtils\nfrom shapely.geometry import LineString\n\n\nclass SupportTriangle(_3dGeometrySuperclass):\n\n # diag_point_idxs: [False, True, True]\n\n def __init__(self, P, height_map, diag_point_idxs:list, name=None, save_incenter=True):\n\n self.unchanged_points = P\n self.diag_point_idxs = diag_point_idxs\n self.points = []\n\n for p in P:\n self.points.append(self.make_2d(p))\n\n self.shapely_poly = Polygon(self.points)\n _3dGeometrySuperclass.__init__(self, height_map, name, self.shapely_poly)\n\n self.incenterx = None\n self.incentery = None\n self.incenterr = None\n\n self.P_diag1, self.P_diag2 = None, None\n self.update_diag_points()\n\n if save_incenter:\n self.save_incenter_xyr()\n\n def update_diag_points(self):\n\n if self.diag_point_idxs[0] and self.diag_point_idxs[1]:\n self.P_diag1, self.P_diag2 = self.points[0], self.points[1]\n\n elif self.diag_point_idxs[0] and self.diag_point_idxs[2]:\n self.P_diag1, self.P_diag2 = self.points[0], self.points[2]\n\n elif self.diag_point_idxs[1] and self.diag_point_idxs[2]:\n self.P_diag1, self.P_diag2 = self.points[1], self.points[2]\n else:\n print(f\"ERROR: diag_point_idxs={self.diag_point_idxs} not valid\")\n\n def get_diag_linestring(self) -> LineString:\n return LineString([self.P_diag1, self.P_diag2])\n\n def save_incenter_xyr(self):\n self.incenterx, self.incentery, self.incenterr = MathUtils.incenter_circle_xy_R(\n self.points[0],self.points[1],self.points[2])\n\n def enforce_safety_margin(self, scaling_factor):\n\n points = list(self.shapely_poly.exterior.coords)\n P1 = points[0]\n P2 = points[1]\n P3 = points[2] # 0, 10\n V1 = MathUtils.get_vector_between_points(P1, P2)\n V1_neg = MathUtils.flip_vector(V1)\n V2 = MathUtils.get_vector_between_points(P2, P3)\n V2_neg = MathUtils.flip_vector(V2)\n V3 = MathUtils.get_vector_between_points(P3, P1)\n V3_neg = MathUtils.flip_vector(V3)\n v1_prime = MathUtils.scalar_multiply_vector( MathUtils.vector_adder(V1_neg, V3), scaling_factor)\n v2_prime = MathUtils.scalar_multiply_vector( MathUtils.vector_adder(V1, V2_neg), scaling_factor)\n v3_prime = MathUtils.scalar_multiply_vector( MathUtils.vector_adder(V2, V3_neg), scaling_factor)\n P_ret1 = MathUtils.add_scaled_vector_to_pt(P1, v1_prime, 1)\n P_ret2 = MathUtils.add_scaled_vector_to_pt(P2, v2_prime, 1)\n P_ret3 = MathUtils.add_scaled_vector_to_pt(P3, v3_prime, 1)\n self.points = [P_ret1, P_ret2, P_ret3]\n self.shapely_poly = Polygon(self.points)\n self.shapely_poly.exterior.coords = self.points\n self.update_diag_points()\n self.save_incenter_xyr()\n\n def visualize(self, style=\"none\", hide_label=True):\n\n if style == \"none\":\n milestones = []\n for p in list(self.shapely_poly.exterior.coords):\n z = self.height_map.height_at_xy(p[0],p[1]) + .01\n milestones.append([p[0],p[1],z])\n path = trajectory.Trajectory(milestones=milestones)\n if not self.name:\n self.name = \"Support Triangle \" + str(random.randint(1, 1000))\n vis.add(self.name, path)\n if hide_label:\n vis.hideLabel(self.name)\n\n elif style == \"dashed\":\n dashes = 10.0\n xyz_points = list(self.shapely_poly.exterior.coords)\n for i in range(len(xyz_points)):\n x, y = xyz_points[i][0], xyz_points[i][1]\n xyz_points[i] = [x, y, self.height_map.height_at_xy(x,y)+.01]\n\n self.visualize_dashed_line(xyz_points[0], xyz_points[1], \"1-2\", dashes)\n self.visualize_dashed_line(xyz_points[0], xyz_points[2], \"1-3\", dashes)\n self.visualize_dashed_line(xyz_points[1], xyz_points[2], \"2-3\", dashes)\n\n\n def visualize_dashed_line(self, xyz1, xyz2, name, dashes):\n\n delta_x = xyz2[0] - xyz1[0]\n delta_y = xyz2[1] - xyz1[1]\n delta_z = xyz2[2] - xyz1[2]\n\n dx = delta_x / (2.0*float(dashes))\n dy = delta_y / (2.0*float(dashes))\n dz = delta_z / (2.0*float(dashes))\n\n for i in range(int(dashes)):\n h=i*2\n p1 = [xyz1[0] + h*dx, xyz1[1] + h*dy, xyz1[2] + h*dz]\n p2 = [xyz1[0] + (h+1)*dx, xyz1[1] + (h+1)*dy, xyz1[2] + (h+1)*dz]\n traj = trajectory.Trajectory(milestones=[p1, p2])\n vis.add((name + str(h)), traj)","sub_path":"src/utils/geometry_objects/_2d_triangle_geometry.py","file_name":"_2d_triangle_geometry.py","file_ext":"py","file_size_in_byte":4795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"7356777","text":"from rich.console import Console\r\nfrom rich import print\r\nimport subprocess as sp\r\nfrom os import path\r\nimport toml\r\nimport glob\r\n\r\nconsole = Console()\r\n\r\ndef load(toml_config):\r\n \"\"\"Load toml config file\r\n \"\"\"\r\n if toml_config.split(\".\")[-1] == \"toml\":\r\n return toml.load(toml_config)\r\n return toml.load(toml_config+\".toml\")\r\n\r\n\r\ndef check_nmap_path(configDict):\r\n \"\"\"check nmap script path and if not, create one.\r\n \"\"\"\r\n install_path = configDict[\"install_path\"]\r\n if path.exists(install_path):\r\n return True\r\n\r\n return sp.Popen([\"mkdir\",\"-p\",install_path])\r\n\r\n\r\n\r\ndef list_scripts(configDict):\r\n scripts = configDict[\"nse-scripts\"][\"scripts\"]\r\n\r\n for s in scripts :\r\n git_data = s.split(\"/\")\r\n nse_name = git_data[-1]\r\n git_name = git_data[-2]\r\n console.print(\"[bold green][+][/bold green] [bold yellow]%s[/bold yellow] by [bold red]@%s[/bold red] at %s\" % (nse_name,git_name,s))\r\n\r\n\r\n\r\ndef install_script(installPath,scriptSource):\r\n \"\"\"Clone and install NSE script from CLI\r\n \"\"\"\r\n git_data = scriptSource.split(\"/\")\r\n nse_name = git_data[-1]\r\n git_name = git_data[-2]\r\n full_path = installPath + nse_name\r\n\r\n if path.exists(full_path):\r\n console.print(\"[+] Consider updating [bold yellow]%s[/bold yellow] by [bold green]@%s[/bold green]\" %(nse_name,git_name), style=\"bold red\")\r\n return \r\n\r\n print(\"[bold green][+][/bold green] Installing [bold yellow]%s[/bold yellow] by [bold red]@%s[/bold red]\" % (nse_name,git_name))\r\n capt = sp.run([\"git\",\"clone\",\"--depth=1\",scriptSource,full_path],stdout=sp.DEVNULL,stderr=sp.DEVNULL)\r\n \r\n # unpack nse script\r\n nse_script = glob.glob(full_path+ \"/*.nse\") + glob.glob(full_path+ \"/*.txt\") + glob.glob(full_path+ \"/*.json\") + glob.glob(full_path+ \"/*.csv\")\r\n for file in nse_script:\r\n sp.run([\"cp\",file,installPath],stdout=sp.DEVNULL)\r\n print(\"[+] Unpacking\",file)\r\n console.print(\"[bold green][+][/bold green] [bold yellow]%s[/bold yellow] successfully Installed !\\n\" % (nse_name),style=\"bold green\")\r\n return\r\n\r\n\r\n\r\ndef install_scripts_all(installPath,configDict):\r\n \"\"\"Install all NSE script from toml config file\r\n \"\"\"\r\n nse_script_links = configDict[\"nse-scripts\"][\"scripts\"]\r\n\r\n for links in nse_script_links:\r\n install_script(installPath,links)\r\n\r\n\r\n\r\ndef clean_install(installPath,configDict):\r\n \"\"\"Remove installed nse scripts\r\n \"\"\"\r\n\r\n scripts = configDict[\"nse-scripts\"][\"scripts\"]\r\n\r\n for s in scripts : \r\n nse_name = s.split(\"/\")[-1]\r\n full_path = installPath + nse_name\r\n\r\n print(\"[bold red][-] Deleting[/bold red] [bold yellow]%s[/bold yellow]\" %(nse_name))\r\n sp.Popen(['rm','-rf',full_path])\r\n\r\n\r\n nse_script = glob.glob(full_path+ \"/*.nse\") + glob.glob(full_path+ \"/*.txt\") + glob.glob(full_path+ \"/*.json\") + glob.glob(full_path+ \"/*.csv\")\r\n nse_script_path = [script.split(\"/\")[-1] for script in nse_script]\r\n\r\n for p in nse_script_path :\r\n sp.Popen(['rm','-f',installPath + p])\r\n print(\"[bold green][+] Everything Cleaned ![/bold green]\")\r\n\r\n\r\n\r\ndef update_script(installPath,scriptSource):\r\n \"\"\"Update NSE script (git pull) from toml config file\r\n \"\"\"\r\n\r\n git_data = scriptSource.split(\"/\")\r\n nse_name = git_data[-1]\r\n git_name = git_data[-2]\r\n full_path = installPath + nse_name\r\n\r\n\r\n # check if installed \r\n if path.exists(full_path) == False:\r\n console.print(\"[+] [bold yellow]%s[/bold yellow] not installed !\" %(nse_name), style=\"bold red\")\r\n console.print(\"[+] Installing [bold yellow]%s[/bold yellow] for you !\" %(nse_name), style=\"bold green\")\r\n install_script(installPath, scriptSource)\r\n return\r\n \r\n # update script with git pull\r\n print(\"[bold green][+][/bold green] Updating [bold yellow]%s[/bold yellow] by [bold red]@%s[/bold red]\" % (nse_name,git_name))\r\n update_message = sp.run([\"git\",\"-C\",full_path,\"pull\"], stdout=sp.PIPE,universal_newlines=True)\r\n\r\n # the repo is already up to date so do nothing\r\n if len(str(update_message.stdout)) == 20:\r\n print(\"[bold green][-][/bold green] [bold yellow]%s[/bold yellow] : %s\" % (nse_name,update_message.stdout))\r\n return\r\n # repo is pulled - or error\r\n # TODO handle error\r\n else: \r\n print(\"[bold green][-][/bold green] [bold yellow]%s[/bold yellow] : %s\" % (nse_name,update_message.stdout))\r\n nse_script = glob.glob(full_path+ \"/*.nse\") + glob.glob(full_path+ \"/*.txt\") + glob.glob(full_path+ \"/*.json\") + glob.glob(full_path+ \"/*.csv\")\r\n for file in nse_script:\r\n sp.run([\"cp\",file,installPath],stdout=sp.DEVNULL)\r\n print(\"[bold green][+][/bold green]Unpacking\",file)\r\n return\r\n\r\n\r\n\r\ndef update_scripts_all(installPath,configDict):\r\n \"\"\"Update all script (pull) from toml config file\r\n \"\"\"\r\n nse_script_links = configDict[\"nse-scripts\"][\"scripts\"]\r\n\r\n for links in nse_script_links:\r\n update_script(installPath,links)\r\n\r\n\r\ndef add_script():\r\n \"\"\"add NSE script from CLI\r\n \"\"\"\r\n pass\r\n","sub_path":"nse_install/nse_install.py","file_name":"nse_install.py","file_ext":"py","file_size_in_byte":5120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"453114833","text":"import json\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\nBASE_URL = \"https://wahl.tagesschau.de/wahlen/\"\nDATE_FORMAT = \"%d.%m.%Y\"\n\n\ndef read_data():\n with open(\"data.html\") as f:\n data = f.read()\n\n return BeautifulSoup(data, 'html.parser')\n\n\ndef save_result(data):\n rv = {\n \"meta\": {\n \"source\": \"https://github.com/ciex/wahlergebnisse\",\n },\n \"data\": data\n }\n with open(\"wahlergebnisse.json\", \"w\") as f:\n json.dump(data, f, indent=2)\n\n\ndef extract(entry):\n rv = {}\n\n title_elem = entry.find(class_=\"spantitel\")\n\n # .spantitel a.href\n href = title_elem.a['href']\n start_pos = href.rfind('/', 0, -len('/index.shtml')) + 1\n rv['url'] = BASE_URL + href[start_pos:]\n\n # .spantitel a span.content\n title = title_elem.a.span.text\n rv['title'] = title\n\n kind_endpos = title.find(' ')\n rv['kind'] = title[:kind_endpos]\n\n year_startpos = title.rfind(' ')\n if year_startpos == kind_endpos:\n rv['territory'] = \"Deutschland\" if rv['kind'] == \"Bundestagswahl\" \\\n else \"Europa\"\n else:\n rv['territory'] = title[kind_endpos + 1:year_startpos]\n\n # .spandatum.contents\n date_text = entry.find(class_=\"spandatum\").text[len('am\\xa0'):]\n rv['date'] = datetime.strptime(date_text, DATE_FORMAT).isoformat()\n\n # ul.subdirectlinks li\n parties = entry.find_all('li')\n rv['results'] = dict()\n for party in parties:\n party_infos = party.find_all('span')\n name = party_infos[0].text\n votes = int(party_infos[1].text[1:-2].replace('.', ''))\n pct = float(party_infos[2].text[:-2].replace(',', '.'))\n\n rv['results'][name.strip()] = {\n 'votes': votes,\n 'pct': pct\n }\n\n return rv\n\n\ndef main():\n data = read_data()\n results = data.find_all('ul', class_='directLinks')[0]\n print(\"{} Wahlen gefunden\".format(len(list(results))))\n\n rv = []\n for entry in results:\n rv.append(extract(entry))\n\n save_result(sorted(rv, key=lambda x: x['title']))\n\nif __name__ == '__main__':\n main()\n","sub_path":"convert_to_json.py","file_name":"convert_to_json.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"496950612","text":"import nltk\nimport pprint\nimport parse_data as p\nimport prior_prob as prior\nimport features as f\nimport math\n\n# Get the number of appearances of each sentiment, as well as the total number of sentences\ndef get_sentiment_counts(reviews):\n\tsent_counts = dict()\n\tsent_counts['total'] = 0\n\n\tfor i in reviews:\n\t\tfor line in reviews[i]['reviews']:\n\t\t\tif line[0] in sent_counts:\n\t\t\t\tsent_counts[line[0]] += 1\n\t\t\telse:\n\t\t\t\tsent_counts[line[0]] = 1\n\t\t\tsent_counts['total'] += 1\n\n\treturn sent_counts\n\n# Generate the feature vectors (which include frequency data so we can use our naive bayes classifier) for the training data\ndef gen_sentiment_vectors(reviews, word_lists, popular_words):\n\tsentiment_vectors = dict()\n\n\tfor i in reviews:\n\t\tdoc_tag = reviews[i]['title']\n\t\tprev_sentiment = None\n\n\t\tfor line in reviews[i]['reviews']:\n\t\t\tif line == (\"

\", ) or line == (\"

\", ):\n\t\t\t\tcontinue\n\n\t\t\tsentiment = line[0]\n\t\t\tsentence = line[1]\n\n\t\t\tfeatures = f.extract_features(sentence, word_lists, popular_words, doc_tag, prev_sentiment)\n\n\t\t\tif sentiment in sentiment_vectors:\n\t\t\t\tsentiment_vectors[sentiment] = f.merge_features(sentiment_vectors[sentiment], features)\n\t\t\telse:\n\t\t\t\tsentiment_vectors[sentiment] = features\n\n\t\t\tprev_sentiment = sentiment\n\n#\t\tif i % 20 == 0:\n#\t\t\tprint \"Done with \" + str(i)\n\n\tfor sentiment in sentiment_vectors:\n\t\tsentiment_vectors[sentiment] = f.smooth_features(sentiment_vectors[sentiment])\n\n\treturn sentiment_vectors\n\n# Calculate the probability of sentiment \"sentiment\" being correct for \"sentence\"\ndef calc_sentiment_prob(vectors, sent_counts, sentence, sentiment, word_lists, popular_words, doc_tag = None, prev_sentiment = None):\n\tfeatures = f.extract_features(sentence, word_lists, popular_words, doc_tag, prev_sentiment)\n\n\tprob = math.log(float(sent_counts[sentiment]) / float(sent_counts['total']))\n\n\tfor feature in features:\n\t\tif feature not in vectors[sentiment]:\n\t\t\tfeature = \"\"\n\n\t\tfeature_count = vectors[sentiment][feature]\n\n\t\tprob += math.log(float(feature_count + 1) / float(sent_counts[sentiment] + len(vectors[sentiment])))\n\n\treturn prob\n","sub_path":"likelihood.py","file_name":"likelihood.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"163558645","text":"import keras\nfrom keras.layers import *\nfrom keras.models import Sequential\n\nbatch_size = 100\nepochs = 3\n\nprint('Loading data...')\nwith np.load(\"./NSL_KDD.npz\") as f:\n x_train, y_train, x_test, y_test = f[\"x_train\"], f[\"y_train\"], f[\"x_test\"], f[\"y_test\"]\nx_train = x_train.astype(\"float32\")\nx_test = x_test.astype(\"float32\")\nprint(len(x_train), 'train sequences')\nprint(len(x_test), 'test sequences')\n\ny_train = keras.utils.to_categorical(y_train)\ny_test = keras.utils.to_categorical(y_test)\nprint('y_train shape:', y_train.shape)\nprint('y_test shape:', y_test.shape)\n\nprint('Building model...')\nmodel = Sequential()\nmodel.add(Dense(512, activation='relu', input_shape=(41,)))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(2, activation='softmax'))\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\nmodel.summary()\nverify_with_test_data = False # 是否使用验证集进行验证\nif verify_with_test_data:\n print(\"verify with test data\")\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=2,\n validation_data=(x_test, y_test))\nelse:\n print(\"verify with train self data\")\n model.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=2,\n validation_split=0.8)\n","sub_path":"NSL_KDD/nsl_kdd_mlp.py","file_name":"nsl_kdd_mlp.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"238216322","text":"import requests\nimport simplejson\nimport json\nimport time\nimport sqlite3\nfrom multiprocessing.dummy import Pool as ThreadPool\n\n\nconn = sqlite3.connect('LOL.db',check_same_thread=False)\ncur = conn.cursor()\ncur.execute('''DROP TABLE IF EXISTS LOL''')\ncur.execute('''CREATE TABLE LOL (\nteamname1 TEXT,\nteamname2 TEXT,\nkills1 INTEGER ,\nkills2 INTEGER ,\nbaronkills1 INTEGER,\nbaronkills2 INTEGER ,\ndeath1 INTEGER ,\ndeath2 INTEGER ,\ndragonkills1 INTEGER ,\ndragonkills2 INTEGER ,\ngolds1 INTEGER,\ngolds2 INTEGER,\nassists1 INTEGER ,\nassists2 INTEGER ,\ntowerkills1 INTEGER ,\ntowerkills2 INTEGER ,\nscheduleid INTEGER\n)''')\n\n\n\nurls = []\nfor k in range(1,5000):\n address = 'http://www.wanplus.com/ajax/matchdetail/' + str(k) + '?_gtk='\n urls.append(address)\n\ndef get_pages(url):\n try:\n cookies = {'Cookie':'isShown=1; gameType=2; Hm_lvt_f69cb5ec253c6012b2aa449fb925c1c2=1492760839,1492776021,1492829141; Hm_lpvt_f69cb5ec253c6012b2aa449fb925c1c2=1492829860; wanplus_token=57293e3cf97c12f47ddb2804d60b9da6; wanplus_storage=l%2F0ntrL0OCyiKxm9zzaRyOrPVaDn%2FHeTJcAxhQX2tZbm54TvxPOBGXcz1N5tG%2BhRKLE9zwNsxT4hSYMvwI%2F04tizp3esiOFnuaiefVSDLvRzzmvP%2BvZn22cC3Fqn%2F%2BA9PKF7kQl78j9o6LSGsrttLL4dS8c5Ik56n1BeZpLB8cGJYd%2BOU8IYXYvdns9pASeH9XtTxNUyx1enZbAmS2Zd0cX6Ccx02vOWC9x1n8oawImKHRj6Vtg8EHobUOMgRbOo1PhzUP5VmW0xKkwBCsXJcgPQyjAuo%2BNxUbihVKub4EzEarKBN3hu0Ky121%2FUouR8o7%2BAmNFwC9yGssM%2BOA%2FfU5H2Ah3qZLz4%2FRp3wOU; wanplus_sid=717f86ffc5006098b35675c51acb4003; wanplus_csrf=_csrf_tk_2005064935'}\n headers = {'Headers':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36','X-Requested-With':'XMLHttpRequest'}\n response = requests.get(url, cookies = cookies, headers=headers,timeout= 5)\n response.encoding = 'utf-8'\n response = response.text\n dict_json = simplejson.loads(response)\n item_json = json.dumps(dict_json, indent=2)\n review_json = json.loads(item_json)\n #print(review_json)\n if 'dragonkills' in review_json['data']['teamStatsList']:\n teamname1 = (review_json['data']['info']['oneteam']['teamalias'])\n teamname2 = (review_json['data']['info']['twoteam']['teamalias'])\n kills1 = (review_json['data']['teamStatsList']['kills'][0])\n kills2 = (review_json['data']['teamStatsList']['kills'][1])\n baronkills1 = (review_json['data']['teamStatsList']['baronkills'][0])\n baronkills2 = (review_json['data']['teamStatsList']['baronkills'][1])\n death1 = (review_json['data']['teamStatsList']['deaths'][0])\n death2 = (review_json['data']['teamStatsList']['deaths'][1])\n dragonkills1 = (review_json['data']['teamStatsList']['dragonkills'][0])\n dragonkills2 = (review_json['data']['teamStatsList']['dragonkills'][1])\n golds1 = (review_json['data']['teamStatsList']['golds'][0])\n golds2 = (review_json['data']['teamStatsList']['golds'][1])\n assists1 = (review_json['data']['teamStatsList']['assists'][0])\n assists2 = (review_json['data']['teamStatsList']['assists'][1])\n towerkills1 = (review_json['data']['teamStatsList']['towerkills'][0])\n towerkills2 = (review_json['data']['teamStatsList']['towerkills'][1])\n scheduleid = (review_json['data']['plList'][0]['1']['scheduleid'])\n conn = sqlite3.connect('LOL.db', check_same_thread=False)\n cur = conn.cursor()\n cur.execute(\n '''INSERT OR IGNORE INTO LOL(teamname1,teamname2,kills1,kills2,baronkills1,baronkills2,death1,death2,dragonkills1,dragonkills2,golds1,golds2,assists1,assists2,towerkills1,towerkills2,scheduleid) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''',\n (teamname1, teamname2, kills1, kills2, baronkills1, baronkills2, death1, death2, dragonkills1,dragonkills2, golds1, golds2, assists1, assists2, towerkills1, towerkills2,scheduleid))\n conn.commit()\n # print(golds1)\n print(url)\n else:\n print('do not belon to LOL',url)\n except:\n print('url do not exist',url)\n\n\npool = ThreadPool(8)\nresult = pool.map(get_pages,urls)\n\npool.close()\npool.join()\n","sub_path":"详细数据爬取.py","file_name":"详细数据爬取.py","file_ext":"py","file_size_in_byte":4242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"442396041","text":"from unittest import TestCase\n\nfrom io import StringIO\nimport sys\n\n\nclass T(TestCase):\n def test0counter(self):\n from ..一年生 import force_locale,\\\n _ロボットやサン as a, みせ, もんだいロボット, しゅるい, かずロボット\n force_locale()\n b = a . しゅるい()\n c = a . てんいんサンをよんでください('もんだいロボット') . ロボットをください('むげん - むげん')\n d = c . しゅるい()\n e = c . こたえだけおしえてください()\n f = e.__class__\n sio, stdout = StringIO(), sys.stdout\n try:\n sys.stdout = sio\n e . しゃべってください()\n c . せつめいしてください()\n a . てんいんサンをよんでください('もんだいロボット') . ロボットをください('6 - むげん') . せつめいしてください()\n finally:\n sys.stdout = stdout\n g = e . しゅるい()\n h = g . しゅるい()\n i = h . しゅるい()\n j = sio.getvalue()\n expected = (True,) * 10\n s0 = 'かずではない\\nむげん - むげん = かずではない\\n6 - むげん = -むげん\\n'\n actual = (\n b is みせ,\n c == 'むげん - むげん',\n d is もんだいロボット,\n f is g is かずロボット, # hard to represent NaN literal.\n h is i is しゅるい,\n j == s0,\n )\n types = b, d, f, h\n s0types = map(str, types)\n s1types = (y.str() for y in types)\n s2types = r'みせ もんだいロボット かずロボット しゅるい'.split()\n z = zip(s0types, s1types, s2types)\n additional = (a is b == c for a, b, c in z)\n actual += tuple(additional)\n self.assertEqual(expected, actual)\n\n def test1deadend(self):\n from ..一年生 import force_locale, いきどまり\n force_locale()\n sio, stderr = StringIO(), sys.stderr\n try:\n sys.stderr = sio\n a = いきどまり . x() . x()\n b = いきどまり . type()\n c = b . type()\n d = いきどまり . str()\n finally:\n sys.stderr = stderr\n s0 = 'わかりません x () {}\\nわかりません x () {}\\n'\n e = sio.getvalue()\n expected = (True,) * 3\n actual = a is b is c is いきどまり, d == r'いきどまり', e == s0\n self.assertEqual(expected, actual)\n","sub_path":"a2019_02_28/module/test/一年生.py","file_name":"一年生.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"602166123","text":"import logging\nimport sys\nimport time\nfrom datetime import datetime, timedelta\n\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\n\nfrom config import USER, PASS, LOGIN_URL\nfrom db_helper import get_session, WeatherStation, MeasurementsDaily\nfrom utils import datestring_to_date\n\n\ndef login():\n browser = webdriver.PhantomJS()\n browser.get(LOGIN_URL)\n email_element = browser.find_element_by_name(\"mCod\")\n email_element.send_keys(USER)\n time.sleep(1)\n pass_element = browser.find_element_by_name(\"mSenha\")\n pass_element.send_keys(PASS)\n time.sleep(1)\n pass_element.submit()\n return browser\n\n\ndef crawl_data():\n logging.basicConfig(filename='crawler.log', level=logging.INFO)\n\n browser = login()\n session = get_session()\n session = session()\n\n ws = session.query(WeatherStation).all()\n\n for w in ws:\n\n logging.info('{} {}: Crawling... '.format(w.omm, w.name))\n\n start = \"01/01/1961\"\n end = datetime.now() - timedelta(days=90)\n end = end.strftime(\"%d/%m/%Y\")\n code_ws = str(w.omm)\n\n url = \"http://www.inmet.gov.br/projetos/rede/pesquisa/gera_serie_txt.php?&mRelEstacao=\" + \\\n code_ws + \"&btnProcesso=serie&mRelDtInicio=\" + start + \"&mRelDtFim=\" + \\\n end + \"&mAtributos=1,1,,,1,1,,1,1,,,1,,,,,\"\n\n browser.get(url)\n soup = BeautifulSoup(browser.page_source, \"html.parser\")\n\n try:\n for pre in soup.find('pre'):\n rows = pre.string.splitlines()\n for r in rows:\n if r.startswith(code_ws):\n\n try:\n dt = datestring_to_date(r.split(';')[1])\n except ValueError:\n dt = None\n\n try:\n hour_utc = r.split(';')[2]\n hour = int(int(hour_utc) / 100)\n hour_utc_td = timedelta(hours=hour)\n except ValueError:\n hour_utc_td = None\n\n hour_utc = r.split(';')[2]\n hour = int(int(hour_utc) / 100)\n dt_complete = datetime(year=dt.year, month=dt.month, day=dt.day, hour=hour)\n\n try:\n db = float(r.split(';')[3])\n except ValueError:\n db = None\n\n try:\n wb = float(r.split(';')[4])\n except ValueError:\n wb = None\n\n try:\n h = float(r.split(';')[5])\n except ValueError:\n h = None\n\n try:\n p = float(r.split(';')[6])\n except ValueError:\n p = None\n\n try:\n wd = int(r.split(';')[7])\n except ValueError:\n wd = 0\n\n try:\n ws = float(r.split(';')[8])\n except ValueError:\n ws = None\n\n try:\n c = int(r.split(';')[9])\n except ValueError:\n c = None\n\n try:\n md = MeasurementsDaily()\n md.weather_station_id = w.id\n md.measure_date_complete = dt_complete\n md.measure_date = dt\n md.utf_hour = hour_utc_td\n md.temp_dry_bulb = db\n md.temp_wet_bulb = wb\n md.humidity = h\n md.level_pressure_on_station = p\n md.wind_direction = wd\n md.wind_speed = ws\n md.cloudiness = c\n\n session.add(md)\n\n except:\n logging.info(r)\n logging.info(sys.exc_info()[0])\n\n session.commit()\n session.close()\n\n except TypeError:\n logging.info('{} {}: End of this stations ... '.format(w.omm, w.name))\n logging.info('Html:{}'.format(soup.prettify()))\n\n logging.info('{} {}: End of this station ... '.format(w.omm, w.name))\n","sub_path":"crawler_data.py","file_name":"crawler_data.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"190798189","text":"from PyQt5.QtWidgets import *\n\napp = QApplication([])\nwin = QWidget()\n\nform = QFormLayout()\nwin.setLayout(form)\n\nformEdit1 = QLineEdit()\nform.addRow(\"name\", formEdit1)\n\nformEdit2 = QLineEdit()\nformButton = QPushButton(\"나이 확인\")\n\nformLayout = QHBoxLayout()\nformLayout.addWidget(formEdit2)\nformLayout.addWidget(formButton)\nform.addRow(\"age\", formLayout)\n\nformLabel = QLabel(\"경고 : 나이가 너무 많습니다.\")\nform.addWidget(formLabel)\n\nformLabel.setVisible(False)\nformButton2 = QPushButton(\"회원가입\")\nform.addRow(formButton2)\n\ndef checkAge():\n age = formEdit2.text()\n if age.isdigit() == False: return\n age = int(age)\n \n if age >= 25: formLabel.setVisible(True)\n else: formLabel.setVisible(False)\n\ndef checkName():\n name = formEdit1.text()\n n = len(name)\n if 1<= n <= 4 : pass\n else:\n msg = QMessageBox()\n msg.setText(\"이름은 1 ~ 4 글자 필수\")\n msg.exec()\n \nformButton.clicked.connect(checkAge)\nformButton2.clicked.connect(checkName)\n\napp.setApplicationName(\"MY WORLD\")\n\nwin.show()\napp.exec()","sub_path":"7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"428098254","text":"# -*- coding: utf-8 -*-\nimport requests, scrapy\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nfrom scrapy.http import Request\nfrom scrapy.spider import BaseSpider\n\nfrom news.items import NewsItem\n\n\n\nclass ScrapeWorldnewsSpider(scrapy.Spider):\n\tname = \"scrape_worldnews\"\n\tallowed_domains = [\"news.163.com\"]\n\tstart_urls = ['http://news.163.com/']\n\n\n\tdef parse(self, response):\n\t\thtml = BeautifulSoup(response.body, \"html.parser\")\n\n\n\t\tprint(html.find(\"a\", {'href': \"http://news.163.com/world/\"})['href'])\n\n\t\tif html.find(\"a\", {'href': \"http://news.163.com/world/\"}):\n\t\t\tyield Request(html.find(\"a\", {'href': \"http://news.163.com/domestic/\"})['href'], callback=self.parse_worldnews)\n\n\tdef parse_worldnews(self, response):\n\t\thtml = BeautifulSoup(response.body, \"html.parser\")\n\n\t\tsite_callback_url = \"http://temp.163.com/special/00804KVA/cm_guoji_02.js?callback=data_callback\"\n\t\tpage_num = \"\"\n\t\tfetch = requests.get(site_callback_url)\n\t\tfetch_json = eval(fetch.text[14:-1])\n\n\t\tfor data in fetch_json:\n\t\t\titem = NewsItem()\n\t\t\titem['category'] = 'world'\n\t\t\titem['title'] = data['title']\n\t\t\titem['date'] = datetime.strptime(data['time'], \"%m/%d/%Y %H:%M:%S\").strftime(\"%Y-%m-%d %H:%M:%S\")\n\t\t\titem['url'] = data['docurl']\n\t\t\titem['article'] = \"\"\n\t\t\tarticle_response = requests.get(item['url'])\n\t\t\tif article_response.status_code == 200:\n\t\t\t\tarticle_html = BeautifulSoup(article_response.text, \"html.parser\")\n\t\t\t\tif article_html.find(\"div\", {'id': \"endText\"}):\n\t\t\t\t\tif article_html.find(\"div\", {'id': \"endText\"}).findAll(\"p\", attrs={\"class\": None}):\n\t\t\t\t\t\tfor p in article_html.find(\"div\", {'id': \"endText\"}).findAll(\"p\", attrs={\"class\": None}):\n\t\t\t\t\t\t\tif p.string is not None:\n\t\t\t\t\t\t\t\titem['article'] += \"{}\\n\\n\".format(p.string)\n\n\t\t\tyield item","sub_path":"news/news/spiders/scrape_worldnews.py","file_name":"scrape_worldnews.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"174218941","text":"import httplib, time\ndef processOnePage(data1):\n\tresults = []\n\titems = data1.split('
')\n\tfor i in range(1, len(items) - 1):\n\t\t#StationName = items[i].split('
\t\t\t\t
')[0].strip()\n\t\tStationID = items[i].split(' \t')[0]\n\t\t#print StationID\n\t\tresults.append(StationID)\n\t\t#conn = httplib.HTTPConnection(\"climate.weather.gc.ca\")\n\t\t#conn.request(\"GET\", \"/climateData/bulkdata_e.html?format=csv&stationID=\" + StationID + \"&Year=2014&Month=1&Day=1&timeframe=2&submit=Download+Data\")\n\t\t#f = open (StationID + \".txt\",\"w\")\n\t\t#f.write(conn.getresponse().read() + \"\\n\")\n\t\t#f.close()\t\t\n\t\t'''hlyRange = items[i].split(' \t\t \t \tLatitude:')[1].split('Longitude:')[0]\n\t\t\tlatitude = latitude.split('')[1].split('')[0]\n\t\t\tdegree = float(latitude.split('°')[0])\n\t\t\tminute = float(latitude.split('°')[1].split('\\'')[0])\n\t\t\tsecond = float(latitude.split('\\'')[1].split('"')[0])\n\t\t\tlatitude = str(degree + minute/60.0 + second/3600.0)\n\t\t\tlongitude = data1.split('Longitude:')[1].split('Elevation:')[0]\n\t\t\tlongitude = longitude.split('')[1].split('')[0]\t\n\t\t\tdegree = float(longitude.split('°')[0])\n\t\t\tminute = float(longitude.split('°')[1].split('\\'')[0])\n\t\t\tsecond = float(longitude.split('\\'')[1].split('"')[0])\n\t\t\tlongitude = str(-(degree + minute/60.0 + second/3600.0))\n\t\t\televation = data1.split('Elevation:')[1].split('Climate ID:')[0]\n\t\t\televation = elevation.split('')[1].split('m')[0]\n\t\t\tclimateID = data1.split('Climate ID:')[1].split('WMO ID:')[0]\n\t\t\tclimateID = climateID.split('')[1].split('')[0]\n\t\t\tWMOID = data1.split('WMO ID:')[1].split('TC ID:')[0]\n\t\t\tWMOID = WMOID.split('')[1].split('')[0]\n\t\t\tTCID = data1.split('TC ID:')[1].split('')[0]\n\t\t\tTCID = TCID.split('')[1].split('')[0]\n\t\t\tresults.append([StationName, StationID, hlyRange, dlyRange, mlyRange, latitude, longitude, elevation, climateID, WMOID, TCID])\n\t\texcept ValueError:\n\t\t\tresults.append([StationName, StationID, hlyRange, dlyRange, mlyRange])\n\t\t'''\n\t\t#print StationName + \"\\t\" + StationID + \"\\t\" + hlyRange + \"\\t\" + dlyRange + \"\\t\" + mlyRange + \"\\t\" + latitude + \"\\t\" + longitude + \"\\t\" + elevation + \"\\t\" + climateID + \"\\t\" + WMOID + \"\\t\" + TCID\n\t\t#time.sleep(5)\n\treturn results\n\n# http://climate.weather.gc.ca/advanceSearch/searchHistoricDataStations_e.html?searchType=stnProv&timeframe=1&lstProvince=ONT&optLimit=yearRange&StartYear=1840&EndYear=2013&Year=2013&Month=9&Day=25&selRowPerPage=100&cmdProvSubmit=Search\n# http://climate.weather.gc.ca/advanceSearch/searchHistoricDataStations_e.html?searchType=stnProv&timeframe=1&lstProvince=ONT&optLimit=yearRange&StartYear=1840&EndYear=2013&Year=2013&Month=9&Day=27&selRowPerPage=100&cmdProvSubmit=Search&startRow=201\n#conn = httplib.HTTPConnection(\"lrcdrrvsdvap002\")\n#conn.request(\"GET\", \"/web/page1.htm\")\nconn = httplib.HTTPConnection(\"climate.weather.gc.ca\")\nconn.request(\"GET\", \"/advanceSearch/searchHistoricDataStations_e.html?searchType=stnProv&timeframe=1&lstProvince=ONT&optLimit=yearRange&StartYear=1840&EndYear=2013&Year=2013&Month=9&Day=25&selRowPerPage=100&cmdProvSubmit=Search\")\nr1 = conn.getresponse()\ndata1 = r1.read()\nrecordNumber = int(data1.split('locations match your customized search. Confirm the')[0].split('to share your comments and suggestions.

')[1])\ntime.sleep(5)\nresults = processOnePage(data1)\nstartRow = 99\nwhile (startRow < recordNumber):\n\tconn = httplib.HTTPConnection(\"climate.weather.gc.ca\")\n\tconn.request(\"GET\", \"/advanceSearch/searchHistoricDataStations_e.html?searchType=stnProv&timeframe=1&lstProvince=ONT&optLimit=yearRange&StartYear=1840&EndYear=2013&Year=2013&Month=9&Day=25&selRowPerPage=100&cmdProvSubmit=Search&startRow=\" + str(startRow))\n\tresults = results + processOnePage(conn.getresponse().read())\n\ttime.sleep(2)\n\tstartRow = startRow + 100\nf = open (\"weather.txt\",\"w\")\n#f.write(\"\\t\".join([\"StationName\", \"StationID\", \"hlyRange\", \"dlyRange\", \"mlyRange\", \"latitude\", \"longitude\", \"elevation\", \"climateID\", \"WMOID\", \"TCID\"]) + \"\\n\")\n#for result in results:\n\t#print \"\\t\".join(result)\n#\tf.write(\"\\t\".join(result) + \"\\n\")\nf.write(\"\\n\".join(results))\nf.close()\n#print len(items)","sub_path":"Step1.py","file_name":"Step1.py","file_ext":"py","file_size_in_byte":6751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"26722143","text":"\ndef graflinea(p1, p2,ax):\n xx = []\n yy = []\n zz = []\n \n x1 = p1.punto.x\n y1 = p1.punto.y\n z1 = p1.punto.z\n \n x2 = p2.destino.punto.x\n y2 = p2.destino.punto.y\n z2 = p2.destino.punto.z\n \n xx.append(x1)\n xx.append(x2)\n \n yy.append(y1)\n yy.append(y2)\n \n zz.append(z1)\n zz.append(z2)\n \n ax.text((x1+x2)/2, (y1+y2)/2, (z1+z2)/2, p2.peso, 'x')\n \n ax.plot(xx,yy,zz)\n \ndef graficar(grafo):\n nodos = grafo.nodos\n graf(nodos)\n pyplot.show()\n \ndef graf(nodos):\n \n fig = pyplot.figure()\n ax = Axes3D(fig)\n \n for nod in nodos:\n x = nod.punto.x\n y = nod.punto.y\n z = nod.punto.z\n \n if (nod.ady > 0):\n for nn in nod.adyacentes:\n graflinea(nod,nn,ax)\n \n ax.scatter(x,y,z)\n ax.text(x, y, z, nod.nombre, 'x')\n\ndef graficarDijkstra(grafo,nombreNodo):\n recorrido = grafo.dijkstra(nombreNodo)\n nodos = grafo.nodos\n grafDij(recorrido,nodos)\n graf(nodos)\n pyplot.show()\n\ndef grafDij(recorrido,nodos):\n nodos = grafo.nodos\n \n fig = pyplot.figure()\n ax = Axes3D(fig)\n \n for nod in nodos:\n x = nod.punto.x\n y = nod.punto.y\n z = nod.punto.z\n \n if (nod.ady > 0):\n for nn in nod.adyacentes:\n if(presente2(recorrido,nod.nombre,nn.destino.nombre)):\n graflinea(nod,nn,ax)\n ax.scatter(x,y,z)\n ax.text(x, y, z, nod.nombre, 'x')\n\ndef conseguirNodo(nodos,nombre):\n for nod in nodos:\n if(nod.nombre == nombre):\n return nod\n\ndef presente(lista,valor):\n for nn in lista:\n if (nn == valor):\n return True\n return False\n \ndef presente(lista,destino):\n for nn in lista:\n adD = nn.destino\n if ( adD == destino ):\n return True\n return False\n \ndef presente2(lista,origen,destino):\n for nn in lista:\n adO = nn.origen\n adD = nn.destino\n if ( adD == destino and adO == origen ):\n return True\n return False\n\nclass punto:\n x = int()\n y = int()\n z = int()\n \n def __init__(self,x,y,z):\n self.x = x\n self.y = y\n self.z = z\n\nclass Nodo:\n def __init__(self,nom,x,y,z):\n self.nombre = nom\n self.punto = punto(x,y,z)\n self.ady = 0\n self.adyacentes = []\n \n def __eq__(self,other):\n return self.nombre == other.nombre\n \n def darNombre(self):\n return self.nombre\n \n def agregarAdyacente(self,nodoDest,peso):\n \n cc = 0\n \n if (nodoDest.nombre != self.nombre):\n for ads in self.adyacentes: # se asegura que no haya una relacion igual \n if ( nodoDest.nombre == ads.nombre):\n cc = 1\n \n if(cc == 0): # en caso de que no, inserta la relacion\n adyac = Adyacente(nodoDest,peso)\n self.adyacentes.append(adyac)\n self.ady += 1\n \nclass Adyacente:\n destino = Nodo(\" \",0,0,0)\n peso = float()\n def __init__(self,dest,peso):\n self.destino = dest\n self.peso = peso\n self.rec = False\n\n def __eq__(self,other):\n return self.destino == other.destino\n \n def nombre(self):\n return self.destino.nombre\n\nclass Grafo:\n nodos = []\n \n def insertarNodo(self,nomb,xpos,ypos,zpos):\n nod = Nodo(nomb,xpos,ypos,zpos)\n self.nodos.append(nod) \n \n def ingresarNuevaRelacion(self,Na,Nb,peso):\n \n ccori = False\n ccdest= False\n \n if (Na != Nb):\n \n for nod in self.nodos: # busca que esten en la lista de nodos\n if(nod.nombre == Na):\n ccori = True\n nodoOri = nod\n if(nod.nombre == Nb):\n ccdest = True\n nodoDest = nod\n \n if ( ccori & ccdest ): # en caso de estar ambos nodos\n # se ingresa un relacion bidireccional\n nodoOri.agregarAdyacente(nodoDest,peso)\n nodoDest.agregarAdyacente(nodoOri,peso)\n \n def dijkstra(self,nnIni):\n \n cc = False\n for nn in self.nodos:\n if (nn.nombre == nnIni):\n nodo = nn\n cc = True\n \n if (cc):\n recorrido = [] # lista de adyacencias\n \n recorrido.append(Link(nodo.nombre,nodo.nombre,0,0))\n \n self.recdij(Adyacente(nodo,0),recorrido,0)\n \n print(\"-------------------------------\")\n for rec in recorrido:\n print(\" \",rec.origen, \" - \",rec.destino ,\" con peso de la relacion: \",rec.peso, \" con peso total :\",rec.pesoTotal,\"\\n\")\n \n return recorrido\n \n def recdij(self,nodo,recorrido,pesoAnt): # tal vez podria crear un nodo nuevo con solo la ruta optima\n \n for nod in nodo.destino.adyacentes:\n \n nnori = nodo.destino.nombre\n nndes = nod.destino.nombre\n peso = nod.peso\n cambio = False\n print(\"------------------------------------\")\n print(nnori ,\" \" , nndes , \" \", peso , \" \" ,peso+pesoAnt)\n print(\"----------\")\n if (nnori != nndes):\n if (presente(recorrido,nod.nombre()) ): \n for nn in recorrido:\n print(nn.origen,\" \" , nn.destino,\" \" , nn.peso,\" \" , nn.pesoTotal)\n if ( nndes == nn.destino ):\n \n if (nn.pesoTotal > nod.peso+pesoAnt):\n print(\"cambio\")\n nn.cambiar(nnori,nndes,peso,peso+pesoAnt)\n cambio = True\n #al hacer una cambio, poner todos los hijos como no visitados\n #\n \n if (cambio):\n cambio = False\n \n for nnn in nod.destino.adyacentes:\n nnn.rec = False\n self.recdij(nod,recorrido,nod.peso+pesoAnt)\n \n elif(not nod.rec):\n nod.rec = True\n print(\"ingresado\")\n recorrido.append(Link(nnori,nndes,peso,peso+pesoAnt))\n self.recdij(nod,recorrido,nod.peso+pesoAnt)\n nod.rec = False\n \n #se visita cada nodo y se hacen todo tipo de cositas chistosas... tengo sueño\n \n def imprimirRelaciones(self):\n \n for nod in self.nodos:\n print(nod.nombre)\n print(\"----\")\n for nn in nod.adyacentes:\n print(\" \",nn.nombre(),\":\",nn.peso)\n print(\"--------\")\n def ingresar(self,nomb):\n xpos = random.randint(1,11)\n ypos = random.randint(1,11)\n zpos = random.randint(1,11)\n grafo.insertarNodo(nomb,xpos,ypos,zpos);\n \nclass Link:\n \n def __init__(self,ori,dest,peso,pesoT):\n self.origen = ori # and isinstance(ori, )\n self.destino = dest# and isinstance(ori, dest)\n self.peso = peso\n self.pesoTotal = pesoT\n \n def darNombreO(self):\n return self.origen.nombre\n def darNombreD(self):\n return self.destino.nombre\n \n def cambiar(self,ori,dest,peso,pesoT):\n self.origen = ori\n self.destino = dest\n self.peso = peso\n self.pesoTotal = pesoT\n\nfrom matplotlib import pyplot\nfrom mpl_toolkits.mplot3d import Axes3D\nimport random\nimport numpy as np\nimport random\nimport math\n\ngrafo = Grafo()\ndesde = \"a\"\n#'''\n# para la entrega oficial\ncanp=0\ncanp = int(input(\"ingresar cantidad de puntos: \"))\nfor a in range(canp):\n nomb = input(\"punto : \")\n \n grafo.ingresar(nomb);\n \nins = 1;\nwhile (ins == 1):\n #int Na,Nb;\n #float peso;\n \n \n Na = input(\"origen : \")\n Nb = input(\"destino : \")\n peso = int(input(\"peso : \"))\n \n grafo.ingresarNuevaRelacion(Na,Nb,peso)\n sig = input(\"seguir? (y/n) : \")\n if(sig != 'y'):\n ins = 0 ;\n'''\n# aint nobody got time for dat\n\nnomb = \"a\"\ngrafo.ingresar(nomb);\nnomb = \"b\"\ngrafo.ingresar(nomb);\nnomb = \"c\"\ngrafo.ingresar(nomb);\nnomb = \"d\"\ngrafo.ingresar(nomb);\nnomb = \"e\"\ngrafo.ingresar(nomb);\nnomb = \"f\"\ngrafo.ingresar(nomb);\n\n\ngrafo.ingresarNuevaRelacion('a','b',6)\ngrafo.ingresarNuevaRelacion('a','c',4)\n\ngrafo.ingresarNuevaRelacion('b','c',3)\n\ngrafo.ingresarNuevaRelacion('b','d',2)\ngrafo.ingresarNuevaRelacion('c','e',9)\n\ngrafo.ingresarNuevaRelacion('d','e',1)\n\ngrafo.ingresarNuevaRelacion('d','f',2)\ngrafo.ingresarNuevaRelacion('e','f',6)\ngrafo.imprimirRelaciones()\n\n#'''\n \n#graficar(grafo)\n\n \n#grafo.dijkstra(\"a\")\n \n\n#input()\ngraficarDijkstra(grafo,desde)","sub_path":"redes/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":9046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"288432811","text":"# -*- coding:utf-8 -*-\n# @Time: 2020/7/10 8:31 上午\n# @Author: duiya duiyady@163.com\n\n\n\"\"\"\n一个机器人位于一个 m x n 网格的左上角 (起始点在下图中标记为“Start” )。\n机器人每次只能向下或者向右移动一步。机器人试图达到网格的右下角(在下图中标记为“Finish”)。\n问总共有多少条不同的路径?\n\"\"\"\n\ndef uniquePaths(m, n):\n tmp = [[0 for _ in range(n)] for _ in range(m)]\n\n def fun(i, j, count):\n if i == m-1 and j == n-1:\n return count + 1\n else:\n if tmp[i][j] != 0:\n return count + tmp[i][j] - 1\n else:\n t_count = count\n if j+1 < n:\n t_count = fun(i, j+1, t_count)\n if i+1 < m:\n t_count = fun(i+1, j, t_count)\n tmp[i][j] = t_count - count + 1\n return t_count\n return fun(0, 0, 0)\n\n\nif __name__ == '__main__':\n print(uniquePaths(3, 2))\n\n\n","sub_path":"src/main/num001_100/62_不同路径.py","file_name":"62_不同路径.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"398691494","text":"# Version final\n#para regenerar\n#pyinstaller --windowed --onefile --add-data \"E:\\aprendiendo pyton\\nuevo proyecto\\graphics\\zombie.ic\nfrom tkinter import *\nfrom tkinter import ttk\nimport sys\nimport os\nimport time\n\n# para usar archivos guardados en el exe\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)\n\n# Ventana\nclass Root(Tk):\n\n def __init__(self):\n super(Root,self).__init__()\n self.title(\"Zombify\")\n self.minsize(564,260)\n self.iconbitmap(resource_path('zombie.ico'))\n self.resizable(width=False, height=False)\n\n# Entrys numericos\nclass ParametroNumerico(Entry):\n\n def __init__(self,parent):\n super().__init__(parent)\n reg =root.register(correctn)\n #selfEntry(root)\n self.config(width = 2, validate='key',validatecommand=(reg,'%P'))\n self.Entryval = StringVar()\n self.config(textvariable = self.Entryval)\n\n# conversion de monstruo\ndef convert():\n stro = int(strEntry.Entryval.get())\n dexo = int(dexEntry.Entryval.get())\n chan = 10\n hdo = int(hdEntry.Entryval.get())\n nombre = nombreEntryval.get()\n hdn=1\n indice= size.index(sizeComboval.get())\n #print(indice)\n if undeadComboval.get() ==\"Skeleton\" or undeadComboval.get() ==\"Bloody Skeleton\" :\n #print(\"esqueleto\")\n #hd\n hdn = hdo\n bonifdh =''\n ACn = sac[indice]\n Attack = sclaw[indice]\n # STR\n strn = stro\n # DEX\n dexn = dexo +2\n # AC\n acn = 10 + (dexn-10)//2 + sac[indice]\n # attack\n attack = f'Claw {sclaw[indice]}+{(strn-10)//2}'\n # INI\n inin = (dexn-10)//2+4\n # DAMAGE REDUCTION\n damred = '5/bludgeoning'\n if undeadComboval.get() ==\"Bloody Skeleton\":\n chan = chan + 4\n damred = f'''5/bludgeoning\n Deathless (Su)\n Fasthealing {hdn//2}'''\n hpextra = hdn*((chan-10)//2)+(hdn*desecrate.get())\n\n\n elif undeadComboval.get()==\"Zombie\" or undeadComboval.get()==\"Fast Zombie\":\n # print(\"zombie\")\n # HD\n hdn = hdo + zhd[indice]\n hpextra = 3 +(hdn*desecrate.get())\n if hdn > 3:\n hpextra = hdn +(hdn*desecrate.get())\n # STR\n strn = stro +2\n # DEX\n dexn = dexo -2\n # DAMAGE REDUCTION\n damred = '''5/slashing\n Staggered (Ex)'''\n if undeadComboval.get()==\"Fast Zombie\":\n dexn = dexo +2\n damred = 'Quick Strikes (Ex)'\n # AC\n acn = 10 + (dexn-10)//2 + zac[indice]\n # attack\n attack = f'Slam {zslam[indice]}+{str((strn-10)//2) }'\n # INI\n inin = (dexn-10)//2\n\n #hpextra\n bonifdh = ''\n if hpextra !=0:\n bonifdh = f'+{hpextra}'\n # BAB\n babn = hdn*3//4\n # CHA\n #chan = 10\n # FORT\n forn = (hdn*1)//3 + (chan-10)//2\n # REFL\n refn = (hdn*1)//3 + (dexn-10)//2\n # WILL\n wiln = (hdn*1)//2+2\n\n resultado = f'''Nombre: {undeadComboval.get()} {nombre}\nSize: {sizeComboval.get()}\nHD: {hdn}d8 {bonifdh}\nBAB {babn}\nSTR: {strn}\nDEX: {dexn}\nCHA: {chan}\nAC: {acn}\nFORT: {forn}\nREFL: {refn}\nWILL: {wiln}\nINIT: {inin}\nAttack: {attack}\nSp: {damred}\n'''\n\n textocomentario=Text(root,width=50, height= 16)\n textocomentario.grid(row=0,column=2,rowspan= 9999)\n # print(resultado)\n textocomentario.insert(INSERT,resultado)\n textocomentario.config(state = 'disabled')\n\n# Para que el botton se hunda cuando presionas enter\ndef invoke_button(event):\n convertbutton.config(relief = \"sunken\")\n root.update_idletasks()\n time.sleep(0.1)\n convertbutton.invoke()\n convertbutton.config(relief = \"raised\")\n\n# def convert1(event):\n# convert()\n\n\n# Función para validar texto\ndef correctc(inp):\n if all(x.isalpha() or x.isspace() for x in inp) and len(inp)<30:\n return True\n else:\n return False\n\n\n# Función para validar numeros\ndef correctn(inp):\n if inp.isdigit() and 0\", convert1)\nroot.bind(\"\", invoke_button)\n# fin Graficos\n\n# somewhere the button is defined to do something when clicked\n#button_save = Button(text=\"Save\", command = self.doSomething)\n\n\n\n\n\n# somewhere else\n\n\n\n\n\n\nroot.mainloop()\n","sub_path":"zombiegIG.py","file_name":"zombiegIG.py","file_ext":"py","file_size_in_byte":7352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"242415546","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.http import HttpResponse\nfrom .models import Blog\n\ndef listQuestions(request):\n latest_blogs = Blog.objects.order_by('-created')[:5]\n context = {\n 'latest_blogs': latest_blogs,\n }\n return render(request,'blog/list.html',context)\n\ndef detail(request,blog_id):\n try:\n blog = Blog.objects.get(pk=blog_id)\n except Blog.DoesNotExist:\n raise Http404(\"Blog does not exist\")\n return render(request, 'blog/detail.html', {'blog': blog})","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"111702701","text":"import itertools\nfrom dataclasses import dataclass\n\nimport numpy as np\nimport pytest\n\nimport vpype as vp\nfrom vpype_cli import DebugData, cli, execute\n\nfrom .utils import TESTS_DIRECTORY, execute_single_line\n\nCM = 96 / 2.54\n\nEXAMPLE_SVG = TESTS_DIRECTORY / \"data\" / \"test_svg\" / \"svg_width_height\" / \"percent_size.svg\"\n\n\n@dataclass\nclass Command:\n command: str\n exit_code_no_layer: int\n exit_code_one_layer: int\n exit_code_two_layers: int\n\n\nMINIMAL_COMMANDS = [\n Command(\"begin grid 2 2 line 0 0 10 10 end\", 0, 0, 0),\n Command(\"begin repeat 2 line 0 0 10 10 end\", 0, 0, 0),\n Command(\"frame\", 0, 0, 0),\n Command(\"random\", 0, 0, 0),\n Command(\"line 0 0 1 1\", 0, 0, 0),\n Command(\"rect 0 0 1 1\", 0, 0, 0),\n Command(\"arc 0 0 1 1 0 90\", 0, 0, 0),\n Command(\"circle 0 0 1\", 0, 0, 0),\n Command(\"ellipse 0 0 2 4\", 0, 0, 0),\n Command(f\"read '{EXAMPLE_SVG}'\", 0, 0, 0),\n Command(f\"read -m '{EXAMPLE_SVG}'\", 0, 0, 0),\n Command(\"write -f svg -\", 0, 0, 0),\n Command(\"write -f hpgl -d hp7475a -p a4 -\", 0, 0, 0),\n Command(\"rotate 0\", 0, 0, 0),\n Command(\"scale 1 1\", 0, 0, 0),\n Command(\"scaleto 10cm 10cm\", 0, 0, 0),\n Command(\"skew 0 0\", 0, 0, 0),\n Command(\"translate 0 0\", 0, 0, 0),\n Command(\"crop 0 0 1 1\", 0, 0, 0),\n Command(\"linesort\", 0, 0, 0),\n Command(\"linesort --two-opt\", 0, 0, 0),\n Command(\"linemerge\", 0, 0, 0),\n Command(\"linesimplify\", 0, 0, 0),\n Command(\"multipass\", 0, 0, 0),\n Command(\"reloop\", 0, 0, 0),\n Command(\"lmove 1 new\", 0, 0, 0),\n Command(\"lcopy 1 new\", 0, 0, 0),\n Command(\"ldelete 1\", 0, 0, 0),\n Command(\"lswap 1 2\", 2, 2, 0),\n Command(\"lreverse 1\", 0, 0, 0),\n Command(\"line 0 0 10 10 lreverse 1\", 0, 0, 0),\n Command(\"random -l1 random -l2 lswap 1 2\", 0, 0, 0),\n Command(\"trim 1mm 1mm\", 0, 0, 0),\n Command(\"splitall\", 0, 0, 0),\n Command(\"filter --min-length 1mm\", 0, 0, 0),\n Command(\"pagesize 10inx15in\", 0, 0, 0),\n Command(\"stat\", 0, 0, 0),\n Command(\"snap 1\", 0, 0, 0),\n Command(\"reverse\", 0, 0, 0),\n Command(\"layout a4\", 0, 0, 0),\n Command(\"squiggles\", 0, 0, 0),\n Command(\"text 'hello wold'\", 0, 0, 0),\n]\n\n# noinspection SpellCheckingInspection\nLOREM = (\n \"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor \"\n \"incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud \"\n \"exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\"\n)\n\n\n@pytest.mark.parametrize(\"cmd\", MINIMAL_COMMANDS)\ndef test_commands_empty_geometry(runner, cmd):\n result = runner.invoke(cli, cmd.command, catch_exceptions=False)\n assert result.exit_code == cmd.exit_code_no_layer\n\n\n@pytest.mark.parametrize(\"cmd\", MINIMAL_COMMANDS)\ndef test_commands_single_line(runner, cmd):\n result = runner.invoke(cli, \"line 0 0 10 10 \" + cmd.command, catch_exceptions=False)\n assert result.exit_code == cmd.exit_code_one_layer\n\n\n@pytest.mark.parametrize(\"cmd\", MINIMAL_COMMANDS)\ndef test_commands_degenerate_line(runner, cmd):\n result = runner.invoke(cli, \"line 0 0 0 0 \" + cmd.command)\n assert result.exit_code == cmd.exit_code_one_layer\n\n\n@pytest.mark.parametrize(\"cmd\", MINIMAL_COMMANDS)\ndef test_commands_random_input(runner, cmd):\n result = runner.invoke(cli, \"random -n 100 \" + cmd.command)\n assert result.exit_code == cmd.exit_code_one_layer\n\n\n@pytest.mark.parametrize(\"args\", MINIMAL_COMMANDS)\ndef test_commands_execute(args):\n if args.exit_code_no_layer == 0:\n execute(args.command)\n\n\n@pytest.mark.parametrize(\"cmd\", MINIMAL_COMMANDS)\ndef test_commands_must_return_document(runner, cmd):\n @cli.command()\n @vp.global_processor\n def assertdoc(document):\n assert document is not None\n assert type(document) is vp.Document\n\n result = runner.invoke(cli, \"line 0 0 10 10 \" + cmd.command + \" assertdoc\")\n assert result.exit_code == cmd.exit_code_one_layer\n\n\n@pytest.mark.parametrize(\"cmd\", MINIMAL_COMMANDS)\ndef test_commands_keeps_page_size(runner, cmd):\n \"\"\"No command shall \"forget\" the current page size, unless its `pagesize` of course.\"\"\"\n\n args = cmd.command\n\n if args.split()[0] in [\"pagesize\", \"layout\"]:\n return\n\n page_size = None\n\n @cli.command()\n @vp.global_processor\n def getpagesize(doc: vp.Document) -> vp.Document:\n nonlocal page_size\n page_size = doc.page_size\n return doc\n\n result = runner.invoke(\n cli, \"random random -l2 pagesize --landscape 5432x4321 \" + args + \" getpagesize\"\n )\n assert result.exit_code == cmd.exit_code_two_layers\n assert page_size == (5432, 4321)\n\n\ndef test_frame(runner):\n result = runner.invoke(\n cli, \"random -n 100 -a 10cm 10cm dbsample frame dbsample frame -o 1cm dbsample dbdump\"\n )\n data = DebugData.load(result.output)\n\n assert result.exit_code == 0\n assert data[0].bounds == data[1].bounds\n assert data[2].count == data[1].count + 1 == data[0].count + 2\n\n\ndef test_random(runner):\n result = runner.invoke(cli, \"random -n 100 -a 10cm 10cm dbsample dbdump\")\n data = DebugData.load(result.output)[0]\n\n assert result.exit_code == 0\n assert data.count == 100\n assert data.bounds_within(0, 0, 10 * CM, 10 * CM)\n\n\ndef test_line(runner):\n result = runner.invoke(cli, \"line 0 0 10cm 10cm dbsample dbdump\")\n data = DebugData.load(result.output)[0]\n\n assert result.exit_code == 0\n assert data.count == 1\n assert data.bounds_within(0, 0, 10 * CM, 10 * CM)\n\n\ndef test_rect(runner):\n result = runner.invoke(cli, \"rect 0 0 10cm 10cm dbsample dbdump\")\n data = DebugData.load(result.output)[0]\n\n assert result.exit_code == 0\n assert data.count == 1\n assert data.bounds_within(0, 0, 10 * CM, 10 * CM)\n\n\ndef test_circle(runner):\n result = runner.invoke(cli, \"circle -q 0.5mm 0 0 10cm dbsample dbdump\")\n data = DebugData.load(result.output)[0]\n\n assert result.exit_code == 0\n assert data.bounds_within(-10 * CM, -10 * CM, 20 * CM, 20 * CM)\n assert data.count == 1\n\n\ndef test_grid(runner):\n result = runner.invoke(\n cli, \"begin grid -o 1cm 1cm 2 2 random -n 10 -a 1cm 1cm end dbsample dbdump\"\n )\n data = DebugData.load(result.output)\n\n assert result.exit_code == 0\n assert data[0].count == 40\n assert data[0].bounds_within(0, 0, 2 * CM, 2 * CM)\n\n\n@pytest.mark.parametrize(\"args\", [\"random -n 100 -a 10cm 10cm\"])\ndef test_write_read_identical(runner, args):\n with runner.isolated_filesystem():\n res1 = runner.invoke(cli, args + \" dbsample dbdump write output.svg\")\n assert res1.exit_code == 0\n res2 = runner.invoke(cli, \"read output.svg dbsample dbdump\")\n assert res2.exit_code == 0\n\n data1 = DebugData.load(res1.output)[0]\n data2 = DebugData.load(res2.output)[0]\n\n assert data1.count == data2.count\n assert np.isclose(data1.bounds[2] - data1.bounds[0], data2.bounds[2] - data2.bounds[0])\n assert np.isclose(data1.bounds[3] - data1.bounds[1], data2.bounds[3] - data2.bounds[1])\n\n\ndef test_rotate_origin(runner):\n res = runner.invoke(\n cli, \"random -n 100 -a 10cm 10cm dbsample rotate -o 0 0 90 dbsample dbdump\"\n )\n data = DebugData.load(res.output)\n assert res.exit_code == 0\n assert data[1].bounds_within(-10 * CM, 0, 10 * CM, 10 * CM)\n\n\ndef test_translate(runner):\n res = runner.invoke(\n cli, \"random -n 100 -a 10cm 10cm dbsample translate 5cm 5cm dbsample dbdump\"\n )\n data = DebugData.load(res.output)\n assert res.exit_code == 0\n assert data[0].bounds_within(0, 0, 10 * CM, 10 * CM)\n assert data[1].bounds_within(5 * CM, 5 * CM, 10 * CM, 10 * CM)\n\n\ndef test_scale_center(runner):\n res = runner.invoke(cli, \"random -n 100 -a 10cm 10cm dbsample scale 2 2 dbsample dbdump\")\n data = DebugData.load(res.output)\n assert res.exit_code == 0\n assert data[0].bounds_within(0, 0, 10 * CM, 10 * CM)\n assert data[1].bounds_within(-5 * CM, -5 * CM, 20 * CM, 20 * CM)\n\n\ndef test_scale_origin(runner):\n res = runner.invoke(\n cli, \"random -n 100 -a 10cm 10cm dbsample scale -o 0 0 2 2 dbsample dbdump\"\n )\n data = DebugData.load(res.output)\n assert res.exit_code == 0\n assert data[0].bounds_within(0, 0, 10 * CM, 10 * CM)\n assert data[1].bounds_within(0, 0, 20 * CM, 20 * CM)\n\n\ndef test_scaleto(runner):\n res = runner.invoke(\n cli, \"rect 0 0 10cm 5cm dbsample scaleto -o 0 0 20cm 20cm dbsample dbdump\"\n )\n data = DebugData.load(res.output)\n assert res.exit_code == 0\n assert data[0].bounds_within(0, 0, 10 * CM, 5 * CM)\n assert data[1].bounds_within(0, 0, 20 * CM, 10 * CM)\n\n\ndef test_scaleto_fit(runner):\n res = runner.invoke(\n cli,\n \"rect 0 0 10cm 5cm dbsample scaleto --fit-dimensions -o 0 0 20cm 20cm dbsample dbdump\",\n )\n data = DebugData.load(res.output)\n assert res.exit_code == 0\n assert data[0].bounds_within(0, 0, 10 * CM, 5 * CM)\n assert data[1].bounds_within(0, 0, 20 * CM, 20 * CM)\n assert not data[1].bounds_within(0, 0, 20 * CM, 10 * CM)\n\n\ndef test_crop_cm(runner):\n res = runner.invoke(cli, \"random -n 100 -a 10cm 10cm crop 2cm 2cm 8cm 8cm dbsample dbdump\")\n data = DebugData.load(res.output)\n assert res.exit_code == 0\n assert data[0].bounds_within(2 * CM, 2 * CM, 8 * CM, 8 * CM)\n assert data[0].count <= 100\n\n\ndef test_crop(runner):\n res = runner.invoke(cli, \"random -n 100 -a 10 10 crop 2 2 6 6 dbsample dbdump\")\n data = DebugData.load(res.output)\n assert res.exit_code == 0\n assert data[0].bounds_within(2, 2, 6, 6)\n assert data[0].count <= 100\n\n\ndef test_crop_line_flush(runner):\n # a line whose end intersect with crop bounds is not kept\n # a line flush with crop bounds is kept\n res = runner.invoke(\n cli, \"line 100 0 100 10 line 0 5 100 5 crop 100 0 200 200 dbsample dbdump\"\n )\n data = DebugData.load(res.output)\n assert res.exit_code == 0\n assert data[0].count == 1\n\n\ndef test_crop_empty(runner):\n res = runner.invoke(cli, \"random -a 10cm 10cm -n 1000 crop 5cm 5cm 0 1cm dbsample dbdump\")\n data = DebugData.load(res.output)\n assert res.exit_code == 0\n assert data[0].count == 0\n\n\ndef test_crop_empty2(runner):\n res = runner.invoke(cli, \"random -a 10cm 10cm -n 1000 crop 5cm 5cm 0 0 dbsample dbdump\")\n data = DebugData.load(res.output)\n assert res.exit_code == 0\n assert data[0].count == 0\n\n\ndef test_trim(runner):\n res = runner.invoke(cli, \"random -a 10cm 10cm -n 1000 trim 1cm 2cm dbsample dbdump\")\n data = DebugData.load(res.output)\n assert res.exit_code == 0\n assert data[0].bounds_within(CM, 2 * CM, 9 * CM, 8 * CM)\n\n\ndef test_trim_large_margins(runner):\n res = runner.invoke(cli, \"random -a 10cm 10cm -n 1000 trim 10cm 2cm dbsample dbdump\")\n data = DebugData.load(res.output)\n assert res.exit_code == 0\n assert data[0].count == 0\n\n\ndef test_trim_large_margins2(runner):\n res = runner.invoke(cli, \"random -a 10cm 10cm -n 1000 trim 10cm 20cm dbsample dbdump\")\n data = DebugData.load(res.output)\n assert res.exit_code == 0\n assert data[0].count == 0\n\n\n@pytest.mark.parametrize(\n (\"linemerge_args\", \"expected\"),\n [\n (\"--no-flip --tolerance 0.05\", 3),\n (\"--no-flip --tolerance 0.25\", 2),\n (\"--tolerance 0.05\", 3),\n (\"--tolerance 0.15\", 2),\n (\"--tolerance 0.25\", 1),\n ],\n)\ndef test_linemerge(runner, linemerge_args, expected):\n res = runner.invoke(\n cli,\n \"line 0 0 0 10 line 0 10.2 0 20 line 30 30 0 20.1 \"\n f\"linemerge {linemerge_args} dbsample dbdump\",\n )\n data = DebugData.load(res.output)[0]\n assert res.exit_code == 0\n assert data.count == expected\n\n\n@pytest.mark.parametrize(\n \"lines\",\n [\n \" \".join(s)\n for s in itertools.permutations(\n [\"line 0 0 0 10\", \"line 0 10 10 10\", \"line 0 0 10 0\", \"line 10 0 10 10\"]\n )\n ],\n)\ndef test_linesort(runner, lines):\n res = runner.invoke(cli, f\"{lines} linesort dbsample dbdump\")\n data = DebugData.load(res.output)[0]\n assert res.exit_code == 0\n assert data.pen_up_length == 0\n\n\n@pytest.mark.parametrize(\n [\"opt\", \"expected\"],\n {\n (\"--no-flip\", 50.0),\n (\"\", 20.0),\n (\"--two-opt\", 0.0),\n },\n)\ndef test_linesort_result(runner, opt, expected):\n res = runner.invoke(\n cli,\n \"line 20 0 30 0 line 10 0 20 0 line 30 0 40 0 line 0 0 10 0 \"\n f\"linesort {opt} dbsample dbdump\",\n )\n\n # test situation: four co-linear, single-segment lines in shuffled order\n #\n # |\n # 0 | +--4--> +--2--> +--1--> +--3-->\n # L_____________________________________\n # 0 10 20 30 40\n\n # the following situation\n\n data = DebugData.load(res.output)[0]\n assert res.exit_code == 0\n assert data.pen_up_length == pytest.approx(expected)\n\n\ndef test_linesort_reject_bad_opt(runner):\n res = runner.invoke(\n cli,\n \"line 0 0 0 10 line 0 10 10 10 line 0 0 10 0 line 10 0 10 10 \"\n f\"linesort --no-flip dbsample dbdump\",\n )\n\n # in this situation, the greedy optimizer is worse than the starting position, so its\n # result should be discarded\n\n data = DebugData.load(res.output)[0]\n assert res.exit_code == 0\n assert data.pen_up_length == pytest.approx(14.1, abs=0.1)\n\n\ndef test_linesort_two_opt_debug_output(runner, caplog):\n res = runner.invoke(cli, \"-vv -s 0 random -n 100 linesort --two-opt\")\n\n assert res.exit_code == 0\n assert \"% done with pass\" in caplog.text\n\n\ndef test_snap():\n line = np.array([0.2, 0.8 + 1.1j, 0.5 + 2.5j])\n lc = execute_single_line(\"snap 1\", line)\n\n assert len(lc) == 1\n assert np.all(lc[0] == np.array([0, 1 + 1j, 2j]))\n\n\ndef test_filter():\n assert len(execute_single_line(\"filter --min-length 10\", [0, 15])) == 1\n assert len(execute_single_line(\"filter --min-length 10\", [0, 10])) == 1\n assert len(execute_single_line(\"filter --min-length 10\", [0, 5])) == 0\n assert len(execute_single_line(\"filter --max-length 10\", [0, 15])) == 0\n assert len(execute_single_line(\"filter --max-length 10\", [0, 10])) == 1\n assert len(execute_single_line(\"filter --max-length 10\", [0, 5])) == 1\n assert len(execute_single_line(\"filter --closed\", [0, 5, 5j, 0])) == 1\n assert len(execute_single_line(\"filter --closed\", [0, 5, 5j])) == 0\n assert len(execute_single_line(\"filter --not-closed\", [0, 5, 5j, 0])) == 0\n assert len(execute_single_line(\"filter --not-closed\", [0, 5, 5j])) == 1\n\n\n@pytest.mark.parametrize(\"pitch\", [0.1, 1, 5, 10, 20, 50, 100, 200, 500])\ndef test_snap_no_duplicate(pitch: float):\n \"\"\"Snap should return no duplicated points and reject lines that degenerate into a single\n point.\"\"\"\n lc = execute_single_line(f\"snap {pitch}\", vp.circle(0, 0, 100, quantization=1))\n\n if len(lc) == 1:\n assert len(lc[0]) > 1\n assert np.all(lc[0][:-1] != lc[0][1:])\n else:\n assert len(lc) == 0\n\n\n@pytest.mark.parametrize(\n (\"line\", \"expected\"),\n [\n ([0, 1 + 2j, 2], [[0, 1 + 2j], [1 + 2j, 2]]),\n ([0, 1 + 2j, 1 + 2j, 2], [[0, 1 + 2j], [1 + 2j, 2]]),\n ],\n)\ndef test_splitall_filter_duplicates(line, expected):\n lc = execute_single_line(\"splitall\", line)\n\n assert np.all(line == expected_line for line, expected_line in zip(lc, expected))\n\n\n@pytest.mark.parametrize(\n (\"args\", \"expected_bounds\"),\n [\n (\"10x10cm\", (4.5, 4.5, 5.5, 5.5)),\n (\"-h left -v top a4\", (0, 0, 1, 1)),\n (\"-m 3cm -h left -v top 10x20cm\", (3, 3, 7, 7)),\n (\"-m 3cm -v bottom 10x20cm\", (3, 13, 7, 17)),\n (\"-m 3cm -h right 20x10cm\", (3, 8, 7, 12)),\n (\"-m 3cm -h right 10x20cm\", (3, 8, 7, 12)),\n (\"-m 3cm -h right -l 20x10cm\", (13, 3, 17, 7)),\n (\"-m 3cm -h right -l 10x20cm\", (13, 3, 17, 7)),\n ],\n)\ndef test_layout(runner, args, expected_bounds):\n document = vp.Document()\n\n @cli.command()\n @vp.global_processor\n def sample(doc: vp.Document):\n nonlocal document\n document = doc\n\n res = runner.invoke(cli, f\"random -n 100 rect 0 0 1cm 1cm layout {args} sample\")\n assert res.exit_code == 0\n bounds = document.bounds()\n assert bounds is not None\n for act, exp in zip(bounds, expected_bounds):\n assert act == pytest.approx(exp * CM)\n\n\n@pytest.mark.parametrize(\"font_name\", vp.FONT_NAMES)\n@pytest.mark.parametrize(\"options\", [\"\", \"-j\"])\ndef test_text_command_wrap(font_name, options):\n doc = execute(f\"text -f {font_name} -w 350 {options} '{LOREM}'\")\n\n bounds = doc[1].bounds()\n assert bounds is not None\n assert -2.0 <= bounds[0] <= 3.0\n if options == \"-j\":\n assert bounds[2] == pytest.approx(350.0)\n else:\n assert bounds[2] <= 350.0\n\n\ndef test_text_command_empty():\n doc = execute(\"text ''\")\n assert doc.is_empty()\n","sub_path":"tests/test_commands.py","file_name":"test_commands.py","file_ext":"py","file_size_in_byte":16717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"27688509","text":"#!/usr/bin/env python\n'''\nThis application is my submission for Project 2 of the Udacity Fullstack Development\nNanoDegree program. It will run up a server and website that will provide the user\nwith a list of books within a variety of different categories\n'''\n# Imports\nfrom flask import Flask, render_template, request, redirect, jsonify, url_for, flash\nfrom sqlalchemy import create_engine, asc\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Category, Book, User\nfrom flask import session as login_session\nimport random\nimport string\nfrom oauth2client.client import flow_from_clientsecrets\nfrom oauth2client.client import FlowExchangeError\nimport httplib2\nimport json\nfrom flask import make_response\nimport requests\n\napp = Flask(__name__)\n\n# Create a database session and connect to it\nengine = create_engine('sqlite:///catalog.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\n\n\n# Function for creating a database\ndef connect():\n session = DBSession()\n return session\n\n\n# Functions for checking if a user exists in the database, or to create one if it does not\ndef createUser(login_session):\n try:\n session = connect()\n newUser = User(name=login_session['username'], email=login_session['email'], picture=login_session['picture'])\n session.add(newUser)\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n return user.id\n finally:\n session.close()\n\n\ndef getUserInfo(user_id):\n try:\n session = connect()\n user = session.query(User).filter_by(id=user_id).one()\n return user\n finally:\n session.close()\n\n\ndef getUserId(email):\n try:\n session = connect()\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except:\n return None\n\n\n# JSON API Endpoints for viewing book information\n@app.route('/category/JSON')\ndef allBooksJSON():\n try:\n session = connect()\n allBooks = session.query(Book).all()\n return jsonify(AllBooks=[i.serialize for i in allBooks])\n finally:\n session.close()\n\n\n@app.route('/category//JSON')\ndef categoryBooksJSON(category_id):\n try:\n session = connect()\n books = session.query(Book).filter_by(category_id=category_id).all()\n return jsonify(Books=[i.serialize for i in books])\n finally:\n session.close()\n\n\n@app.route('/book//JSON')\ndef bookJSON(book_id):\n try:\n session = connect()\n book = session.query(Book).filter_by(id=book_id).one()\n return jsonify(Book=[book.serialize])\n finally:\n session.close()\n\n\n# Create a new category\n@app.route('/category/new/', methods=['GET', 'POST'])\ndef newCategory():\n if 'username' not in login_session:\n return redirect('/login')\n if request.method == 'POST':\n newCategory = Category(name=request.form['name'], user_id=login_session['user_id'])\n try:\n session = connect()\n session.add(newCategory)\n flash('New Category %s Successfully Created' % newCategory.name)\n session.commit()\n return redirect(url_for('showMainPage'))\n finally:\n session.close()\n else:\n return render_template('newCategory.html')\n\n\n# Create a new Book\n@app.route('/book/new//', methods=['GET', 'POST'])\ndef newBook(category_id):\n if 'username' not in login_session:\n return redirect('/login')\n try:\n session = connect()\n category = session.query(Category).filter_by(id=category_id).one()\n if request.method == 'POST':\n newBook = Book(name=request.form['name'], category_id=category.id, description=request.form['description'], user_id=category.user_id)\n try:\n session.add(newBook)\n session.commit()\n return redirect(url_for('showMainPage'))\n finally:\n session.close()\n else:\n return render_template('newBook.html', category_id=category_id)\n finally:\n session.close()\n\n\n# Route for editing books\n@app.route('/category//book//edit/', methods=['GET', 'POST'])\ndef editBook(book_id, category_id):\n if 'username' not in login_session:\n return redirect('/login')\n try:\n session = connect()\n bookToEdit = session.query(Book).filter_by(id=book_id).one()\n if login_session['user_id'] != bookToEdit.user_id:\n flash(\"You do not have authorization to edit this book!\")\n return render_template('bookDescription.html', book=bookToEdit)\n if request.method == 'POST':\n if request.form['name']:\n bookToEdit.name = request.form['name']\n if request.form['description']:\n bookToEdit.description = request.form['description']\n session.add(bookToEdit)\n session.commit()\n return render_template('bookDescription.html', book=bookToEdit)\n else:\n return render_template('editBook.html', book=bookToEdit)\n finally:\n session.close()\n\n\n# Route for deleting a book\n@app.route('/category//book//delete', methods=['GET', 'POST'])\ndef deleteBook(category_id, book_id):\n if 'username' not in login_session:\n return redirect('/login')\n try:\n session = connect()\n bookToDelete = session.query(Book).filter_by(id=book_id).one()\n if login_session['user_id'] != bookToDelete.user_id:\n flash(\"You do not have authorization to delete this book!\")\n return render_template('bookDescription.html', book=bookToDelete)\n if request.method == 'POST':\n session.delete(bookToDelete)\n session.commit()\n return redirect(url_for('showMainPage'))\n else:\n return render_template('deleteBook.html', book=bookToDelete)\n finally:\n session.close()\n\n\n# Route for editing an existing category\n@app.route('/category//edit', methods=['GET', 'POST'])\ndef editCategory(category_id):\n if 'username' not in login_session:\n return redirect('/login')\n try:\n session = connect()\n categoryToEdit = session.query(Category).filter_by(id=category_id).one()\n books = session.query(Book).filter_by(category_id=categoryToEdit.id).all()\n if login_session['user_id'] != categoryToEdit.user_id:\n flash(\"You do not have authorization to edit this category!\")\n return render_template('displayCatBooks.html', category=categoryToEdit, books=books)\n if request.method == 'POST':\n if request.form['name']:\n categoryToEdit.name = request.form['name']\n session.add(categoryToEdit)\n session.commit()\n return redirect(url_for('showMainPage'))\n else:\n return render_template('editCategory.html', category=categoryToEdit)\n finally:\n session.close()\n\n\n# Route for Logging in\n@app.route('/login/')\ndef login():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state)\n\n\n@app.route('/')\ndef showMainPage():\n try:\n session = connect()\n categories = session.query(Category).all()\n books = session.query(Book).all()\n if 'username' not in login_session:\n return render_template(\"publicMainPage.html\", categories=categories, books=books)\n else:\n return render_template(\"mainPage.html\", categories=categories, books=books)\n finally:\n session.close()\n\n\n@app.route('/category/books//')\ndef showCategoryBooks(category_id):\n try:\n session = connect()\n category = session.query(Category).filter_by(id=category_id).one()\n books = session.query(Book).filter_by(category_id=category.id).all()\n if 'username' not in login_session:\n return render_template(\"publicDisplayCatBooks.html\", category=category, books=books)\n else:\n return render_template(\"displayCatBooks.html\", category=category, books=books)\n finally:\n session.close()\n\n\n@app.route('/books//')\ndef showBookDescription(book_id):\n try:\n session = connect()\n book = session.query(Book).filter_by(id=book_id).one()\n session.close()\n if 'username' not in login_session:\n return render_template(\"publicBookDescription.html\", book=book)\n else:\n return render_template('bookDescription.html', book=book)\n finally:\n session.close()\n\n\n@app.route('/fbconnect', methods=['POST'])\ndef fbconnect():\n # Verify the value of state, to guard against cross-site reference forgery attacks\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid State Paramater.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n access_token = request.data\n\n # Exchange the client token for long-lived server-side token\n app_id = json.loads(open('fb_client_secrets.json', 'r').read())['web']['app_id']\n app_secret = json.loads(open('fb_client_secrets.json', 'r').read())['web']['app_secret']\n url = 'https://graph.facebook.com/oauth/access_token?grant_type=fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_token=%s' % (app_id, app_secret, access_token)\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n\n # Use token to get user info from API\n # userinfo_url = \"https://graph.facebook.com/v2.8/me\" //Unsure of teh purpose of this code, which was included in the demo code\n '''\n Split the result from the server exchange first by commans, and select the first index\n which will give us the key:value for the server access token then we will split it on colons to pull\n out the actual token value and format it to be used directly in the graph API calls\n '''\n token = result.split(',')[0].split(':')[1].replace('\"', '')\n\n url = 'https://graph.facebook.com/v2.8/me?access_token=%s&fields=name,id,email' % token\n\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n data = json.loads(result)\n login_session['provider'] = 'facebook'\n login_session['username'] = data[\"name\"]\n login_session['email'] = data[\"email\"]\n login_session['facebook_id'] = data[\"id\"]\n\n # Store the token in the login_session so that we can logout when requested\n login_session['access_token'] = token\n\n # Get user's picture\n url = 'https://graph.facebook.com/v2.8/me/picture?access_token=%s&redirect=0&height=200&width=200' % token\n h = httplib2.Http()\n result = h.request(url, 'GET')[1]\n data = json.loads(result)\n login_session['picture'] = data[\"data\"][\"url\"]\n\n # See if user currently exists in database, and add them if they do not\n user_id = getUserId(login_session['email'])\n if not user_id:\n user_id = createUser(login_session)\n login_session['user_id'] = user_id\n\n output = ''\n output += '

Welcome, '\n output += login_session['username']\n output += '!

'\n output += '' % sys.argv[0])\n sys.exit(1)\n\nactions = {'int' : handle_int,\n 'float' : handle_float,\n 'str' : handle_string,\n}\n\nwith open(sys.argv[1], 'r') as srcfile:\n for line in srcfile:\n line = line.strip()\n line = re.sub(' +', ' ', line)\n if not line or line[0] == '#':\n continue\n parts = line.split()\n if len(parts) >= 3:\n result = actions[parts[0].split('=')[1]](parts[1], parts[2], parts[0].split('=')[0])\n print(result)\n","sub_path":"09_DSLs/dsl_calc.py","file_name":"dsl_calc.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"540472725","text":"import os\nimport sys\ntry:\n sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')\nexcept:\n pass\nimport cv2 \nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport numpy as np\nfrom models.StereoNet8Xmulti import StereoNet\nimport matplotlib.pyplot as plt\ntorch.backends.cudnn.benchmark = True\n\ndir_kitti='/home/liu/DP_DATA/STEREO/KITTI/testing/image_2'\n\n\npaths=[]\nfor root, dirs, files in os.walk(dir_kitti):\n for file in files:\n paths.append(os.path.join(root,file))\n\nnet = StereoNet(3,3,192)\n#net=net.cuda()\nnet = torch.nn.DataParallel(net).cuda()\n\ncheckpoint = torch.load('/home/liu/workspace/StereoNet-ActiveStereoNet/results/8Xmulti/checkpoint.pth')\nnet.load_state_dict(checkpoint['state_dict'])\n\n\n\nmean = torch.tensor([0., 0., 0.], dtype=torch.float32)\nstd = torch.tensor([1., 1., 1.], dtype=torch.float32)\n\ntotensor = transforms.ToTensor()\nnormalize = transforms.Normalize(mean.tolist(), std.tolist())\nunnormalize = transforms.Normalize((-mean / std).tolist(), (1.0 / std).tolist())\n\n\nfig, ax = plt.subplots(2, 2, figsize=(16, 8))\nimport time\n\nfor path_left in paths:\n path_right = path_left.replace('image_2', 'image_3')\n imageL = cv2.imread(path_left)\n imageR = cv2.imread(path_right)\n\n imageL = cv2.resize(imageL, dsize=None, fx=1.5, fy=1.5)\n imageR = cv2.resize(imageR, dsize=None, fx=1.5, fy=1.5)\n\n #new_size = (w, h)\n #imageL = cv2.resize(imageL, new_size, interpolation=cv2.INTER_NEAREST)\n #imageR = cv2.resize(imageR, new_size, interpolation=cv2.INTER_NEAREST)\n imageL = normalize(totensor(imageL))[None,:].cuda()\n imageR = normalize(totensor(imageR))[None,:].cuda()\n start = time.time()\n with torch.no_grad():\n result = net(imageL, imageR)\n end = time.time()\n print(end - start)\n if(True):\n imL_ = unnormalize(imageL[0]).permute(1,2,0).cpu().detach().numpy()\n imR_ = unnormalize(imageR[0]).permute(1,2,0).cpu().detach().numpy()\n disp_NET_ = result[3].cpu().detach().numpy()[0]\n \n #plt.subplot(2,2,1)\n ax[0,0].imshow( imL_[...,::-1])\n #plt.subplot(2,2,2)\n ax[0,1].imshow(imR_[...,::-1])\n #plt.subplot(2,2,4)\n ax[1,0].imshow(disp_NET_*2,cmap='rainbow',vmin=0, vmax=192)\n #plt.colorbar()\n plt.pause(1)\n \n\n \n","sub_path":"test_kitti.py","file_name":"test_kitti.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"450944140","text":"# coding: utf-8\n\nimport os\nimport sys\nimport jieba\nimport jpype\n\nfrom logging import config as logconfig\n\n\nAPP_DIR = os.path.dirname(__file__)\nROOT_DIR = os.path.dirname(APP_DIR)\nRESOURCE_DIR = os.path.join(ROOT_DIR, 'zresource')\nTMP_DIR = os.path.join(RESOURCE_DIR, 'tmp')\n\nlogconfig.fileConfig(os.path.join(ROOT_DIR, 'logging.ini'))\n\n\nAPP_RESOURCE_DIR = os.path.join(RESOURCE_DIR, os.path.basename(APP_DIR))\n\n'''lexicon'''\n# 无关词\nLEXICON_IRRELEVANT_WORDS_DIR = os.path.join(APP_RESOURCE_DIR, 'lexicon', 'irrelevant')\n# 情感固定的通用情感词\nLEXICON_FIXED_SENTIMENT_WORDS_FILE = os.path.join(APP_RESOURCE_DIR, 'lexicon', 'fixed_sentiment', '情感词汇本体.xlsx')\n# 程度词词库\nLEXICON_DEGREE_WORDS_FILE = os.path.join(APP_RESOURCE_DIR, 'lexicon', 'degree', '程度词.txt')\n\n\n'''syntax parser'''\nDEFAULT_PARSER = 'comb'\n\n\n'''LTP Model'''\nLTP_MODEL_DIR = os.path.join(APP_RESOURCE_DIR, 'model', 'ltp')\nCUSTOM_TOKEN_FILE = os.path.join(APP_RESOURCE_DIR, 'lexicon', 'ltp', 'custom.token.txt')\nCUSTOM_POS_FILE = os.path.join(APP_RESOURCE_DIR, 'lexicon', 'ltp', 'custom.pos.txt')\n\n\n'''HanLP Model'''\nHANLP_MODEL_DIR = os.path.join(APP_RESOURCE_DIR, 'model', 'hanlp')\n\njars_hanlp = [HANLP_MODEL_DIR, os.path.join(HANLP_MODEL_DIR, 'hanlp-1.3.4.jar')]\n\nseparator = ';' if sys.platform.startswith('win') else ':'\nclasspath = separator.join(jars_hanlp)\nclasspath_option = '-Djava.class.path=' + classpath\n\n# -Dfile.encoding=UTF8\nprint(jpype.isJVMStarted())\nif not jpype.isJVMStarted():\n jpype.startJVM(jpype.getDefaultJVMPath(), classpath_option, '-Xrs', '-Xmx2048m')\n\n\ndef add_user_words(words):\n for word, freq, tag in words:\n jieba.add_word(word, freq=freq, tag=tag)\n\n\n# 更改jieba默认字典\n# jieba.set_dictionary(os.path.join(RESOURCE_DIR, 'nlp', 'lexicon', 'jieba', 'dict.big.txt'))\n","sub_path":"nlp/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"654487880","text":"import numpy as np\nimport matplotlib.pyplot as plt\n#import numdifftools as nd\nimport scipy as scipy\nfrom scipy.linalg import solve\nfrom scipy.sparse.linalg import spsolve\nimport warnings\n\nclass relaxation_method:\n def __init__(self,xmin,xmax,M,N,B1=[],B2=[],y_guess=[],iteration=100,start=False,display=False,**kwargs):\n #boundary conditions N=n1+n2\n self.N = N\n self.M = M\n self.n1 = len(B1)\n self.n2 = len(B2)\n self.B1 = B1\n self.B2 = B2\n \n self.iteration = iteration\n self.display = display\n self.k = 0\n self.slowc = 1.0\n self.successful = True\n self.eps = 1.0e-10\n self.tol = 1.0e-15\n\n #mesh grid\n self.x = np.linspace(xmin,xmax,M)\n # initial guess of y[i,j], stored in 2D array\n self.y = np.ones((N,M)) \n self.set_initial_guess = False\n if(len(y_guess)!=0):\n if(y_guess.shape!=self.y.shape):\n exit(\"shape of y does not match N x M shape\")\n else: \n self.set_initial_guess = True\n self.y = y_guess\n #error\n self.err = []\n self.iterations = []\n\n self.init(**kwargs)\n if(start): self.x,self.y = self.start()\n\n self.dic = dict(err=self.err,iterations=self.iterations,successful=self.successful)\n\n\n def init(self,*args,**kwargs):\n pass\n\n def f(self,t,y,i=None):\n \"\"\"\n #x = t\n #y[0] = x'\n #y[1] = x\n #y[2] = y\n #f = [2-x, y*x, z]\n x = y.item(0)\n u = y.item(1)\n z = y.item(2)\n f = [z,2.0-x,u*x]\n if(i is None): return np.array(f)\n else: return f[i]\n \"\"\"\n return 0\n\n def jac(self,t,y,i=None,j=None):\n \"\"\"\n #x = t\n #y[0] = x\n #y[1] = y\n #y[2] = z = x'\n x = y.item(0)\n u = y.item(1)\n z = y.item(2) \n jac = np.zeros((self.N,self.N),dtype=np.float64)\n\n jac[0][0] = 0\n jac[0][1] = 0\n jac[0][2] = 1\n\n jac[1][0] = -1.0\n jac[1][1] = 0\n jac[1][2] = 0\n\n jac[2][0] = u\n jac[2][1] = x\n jac[2][2] = 0\n \n if(i is None):\n return jac\n else: return jac.item(i,j)\n \"\"\"\n #return nd.Jacobian(lambda y,x: self.f(x,y),order=2)((y[...,k]+y[...,k-1])/2.,(x[k]+x[k-1])/2.)\n eps = self.eps\n N = len(y)\n jac = np.zeros([N,N], dtype = np.float64) \n for i in range(N): \n y1 = y.copy() \n y2 = y.copy() \n y1[i] += eps \n y2[i] -= eps \n f1 = self.f(t, y1) \n f2 = self.f(t, y2) \n jac[ : , i] = (f1 - f2) / (2 * eps) \n return jac\n\n def dB1dy(self):\n #shape n1 x N\n dB1dy = np.zeros((self.n1,self.N))\n \"\"\"\n dB1dy[0][0] = 1.0\n dB1dy[1][1] = 1.0\n \"\"\"\n return dB1dy\n\n def dB2dy(self):\n #shape n2 x N\n dB2dy = np.zeros((self.n2,self.N))\n \"\"\"\n dB2dy[0][0] = 1.0\n \"\"\"\n return dB2dy\n\n def scale(self,k): \n return 0.25\n\n def delta(self,i,j):\n if(i==j):\n return 1.0\n else: \n return 0.0\n\n def CalcError(self,deltaylist): \n errSum=0\n for ip in range(self.M):\n for ie in range(self.N): \n errSum = errSum + abs(deltaylist[ip*self.N+ie])/self.scale(ie)\n return errSum/(self.M*self.N)\n\n def CalcRes(self):\n n1 = self.n1\n n2 = self.n2\n N = self.N\n M = self.M\n x = self.x\n y = self.y\n f = self.f\n B1 = self.B1\n B2 = self.B2\n \n reslist=[]\n # first ne residuals at k=0\n for i in range(n1): \n E = y[i][0] - self.B1[i]\n if(self.set_initial_guess):\n reslist.append(0)\n else: \n reslist.append(E)\n # for each k=1,..,M-1, there will be 2ne residuals\n for k in range(1,M):\n E = y[...,k] - y[...,k-1] - (x[k]-x[k-1])*(self.f(.5*(x[k]+x[k-1]),.5*(y[...,k]+y[...,k-1])))\n for j in range(N):\n reslist.append(E.item(j))\n # last ne residuals at k=M\n for i in range(n2): \n E = y[i][-1] - self.B2[i]\n if(self.set_initial_guess):\n reslist.append(0)\n else: reslist.append(E)\n \n return np.array(reslist,dtype=np.float64)\n\n def UpdateY(self,deltayList):\n for k in range(self.M): \n for j in range(self.N): self.y[j,k]=self.y[j,k]+deltayList[k*self.N+j]\n\n def CalcS(self):\n n1 = self.n1\n n2 = self.n2\n N = self.N\n M = self.M\n x = self.x\n y = self.y\n s = np.zeros((M*N, M*N),dtype=np.float64)\n ns = M*N\n #at initial boundary\n for i in range(n1):\n for j in range(N):\n s[i][j] = self.dB1dy()[i][j]\n #at outer boundary \n for i in range(n2):\n for j in range(N):\n s[ns-n2+i][(M-1)*N+j] = self.dB2dy()[i][j]\n #interior points \n for k in range(1,M):\n dx = x[k] - x[k-1]\n r0 = n1 + N*(k-1)\n c0 = N*(k-1)\n self.k = k\n dgdy = self.jac((x[k]+x[k-1])/2.,(y[...,k]+y[...,k-1])/2.)\n for i in range(N):\n for j in range(N):\n s[r0+i][c0+j] = -self.delta(i,j) - 0.5*dx*dgdy.item(i,j)\n r1 = r0\n c1 = c0 + N\n for i in range(N):\n for j in range(N):\n s[r1+i][c1+j] = self.delta(i,j) - 0.5*dx*dgdy.item(i,j)\n return np.array(s)\n\n def start(self):\n errList = []\n itList = []\n for it in range(self.iteration):\n resList=self.CalcRes()\n s = self.CalcS()\n with warnings.catch_warnings(): \n warnings.simplefilter(\"ignore\")\n deltayList = spsolve(s, -resList)\n err = self.CalcError(deltayList)\n if(self.display): print(it,err)\n errList.append(err)\n itList.append(it)\n stopIteration = err < self.tol#pow(10,-15)\n if not stopIteration: deltayList = self.slowc/max([self.slowc,err])*deltayList\n self.UpdateY(deltayList)\n if stopIteration: break\n if it == self.iteration-1:\n self.successful = False\n break \n self.err = errList\n self.iterations = itList\n return self.x, self.y\n\n","sub_path":"relaxation_method.py","file_name":"relaxation_method.py","file_ext":"py","file_size_in_byte":6654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"538371004","text":"in_file_path = '../giga/samples_giga.txt'\nout_file_path = 'train.txt'\n\nout_file = open(out_file_path, 'w', encoding='utf-8')\nn_samples = 10\n\n\ndef read_raml_sample_file():\n raml_file = open(in_file_path, 'r', encoding='utf-8')\n\n train_data = []\n sample_num = -1\n for line in raml_file.readlines():\n line = line[:-1]\n if line.startswith('***'):\n continue\n elif line.endswith('samples'):\n sample_num = eval(line.split()[0])\n assert sample_num == 1 or sample_num == n_samples\n elif line.startswith('source:'):\n train_data.append({'source': line[7:], 'targets': []})\n else:\n train_data[-1]['targets'].append(line.split('|||'))\n if sample_num == 1:\n for i in range(n_samples - 1):\n train_data[-1]['targets'].append(line.split('|||'))\n return train_data\n\n\ndata = read_raml_sample_file()\nfor d in data:\n source = d['source'].strip()\n for a in d['targets']:\n target = a[0].strip()\n score = a[1]\n out_file.write(source + '\\n' + target + '\\n' + str(score).strip() + '\\n')\n","sub_path":"data/giga_raml/format_train.py","file_name":"format_train.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"452749362","text":"from fuzzy_model.Fuzzy import rank\nfrom fuzzy_model.reader import Reader\nfrom fuzzy_model.preprocess import allPreprocess\nfrom fuzzy_model.lexer_query import Lexer\nfrom fuzzy_model.parse_query import Parser\nimport fuzzy_model.feedback as feedback\nfrom fuzzy_model.measures import accuracy,relay, f,f1\nfrom random import choices\nimport json\n#print(allPreprocess(\"Ant-Man\"))\nr = Reader()\ndirectory = \"./corpus/medicina\"\nr.readDirectory(directory)\nprint(\"documents are ready\")\nprint(len(r.vocab))\nl = Lexer()\np = Parser(l)\nprint(\"parser pass\")\n\nwith open(\"./queries.txt\") as fd:\n info = json.load(fd)\npres = []\nrecob = []\n\nfor i in range(len(info)):\n\n search = info[i]\n query = search[\"Text\"]\n relevant_docs = search[\"RelevantDocuments\"]\n print(query)\n query = p.parse(query)\n #print(\"Query #{}: {}\".format(len(pres),query))\n #print(\"ranking {} documents\".format(len(r.documents)))\n most_similar = rank(query,r.documents,directory)\n #print(most_similar)\n most_similar = [i[1] for i in filter(lambda doc: doc[0]>0.8,most_similar)]\n most_similar = [i[9:] for i in most_similar]\n #print(most_similar)\n print(\"finded {} results\".format(len(most_similar)))\n rr = list(filter(lambda x: x in relevant_docs,most_similar))\n ri = list(filter(lambda x: not x in relevant_docs,most_similar))\n nr = list(filter(lambda x: not x in most_similar,relevant_docs))\n a = accuracy(rr,ri)\n rel = relay(rr,nr)\n pres.append(a)\n recob.append(rel)\n f_1 = f1(a,rel)\n f_2 = f(a,rel,2)\n f_05 = f(a,rel,0.5)\n print(\"P: {}, R:{}, F(1):{}, F(2):{}, F(0.5): {}\".format(a,rel,f_1,f_2,f_05))\n \n\nprint(pres)\nprint(recob)\n\n\n","sub_path":"FlaskApp/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"457447554","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 2 22:20:46 2019\n\n@author: shintaku\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 21 17:01:17 2018\n\n@author: lab\n\"\"\"\n\n# In[]\n#import time, datetime, os#, nidaqmx#, itertools\nimport time, datetime, os, serial#import matplotlib.pyplot as plt\n#from matplotlib import animation\n#import numpy as np\n\n#ser = serial.Serial('COM3', 9600, timeout=1)\n\n# In[]\nclass AI(): \n def DefFile(): # Making Folder for saving this result\n global FolderName1\n global FileName1\n global VoltageFile\n \n FolderName1='C:/Users/Lab/Documents/python/git_electroporation'\n # FolderName1='C:/Users/lab.LABNOTE/Documents'\n \n FolderName1=FolderName1+\"/\"+str(datetime.datetime.today().strftime(\"%Y%m%d\"))\n os.makedirs(FolderName1,exist_ok=True)\n \n FileName=str(datetime.datetime.\n \n today().strftime(\"%Y%m%d_%H%M%S\"))+'_exp'\n FileName1=FolderName1+\"/\"+FileName+str(1+len([x for x in os.listdir(FolderName1) if x.endswith(\".csv\")])).zfill(4)\n return(FileName1)\n \n\n def ArduinoAI(x,y,c):\n# import serial\n #ser = serial.Serial('COM3',9600,timeout=5)\n #ser.flushInput()\n #ser.open()\n c = []\n #配列cをclear\n ser.write(b'AI1:4')\n #time.sleep(0.01)\n ser_bytes = ser.readline().decode(\"utf-8\")\n #decoded_bytes = ser_bytes[0:len(ser_bytes)].decode(\"utf-8\")\n #decoded_bytes = list(map(int,str.split(decoded_bytes.strip(\"\\r\\n\"),\",\")))\n decoded_bytes = ser_bytes.strip() # hiroyuki\n \n x.append(time.time())\n y.extend(decoded_bytes.split(\",\"))\n\n c.append(time.time())\n c.extend(decoded_bytes.split(\",\"))\n\n if len(c) == 2:\n c[1] = 0\n c.extend([0,0,0])\n \n \n for i in range(5): # range(X):Xはチャンネル数\n c[i] = float(c[i]) # listをfloat形式に変換\n \n #c[1] = round(c[1]*0.1266-114.74,4)\n #c[2] = round(c[2]*0.1266-114.74,4)\n #c[3] = round(-(c[3]-204.6)/818.4*99-1,4)\n \n #print('c',c)\n \n #ser.close()\n return(x,y,c)\n \n def ArduinoDO(flag):\n# import serial\n #ser = serial.Serial('COM3',9600,timeout=1)\n #ser.flushInput()\n if flag==True: \n ser.write(b'DO1H')\n else:\n ser.write(b'DO1L')\n #ser.close()\n def ArduinoDP(ch,pulsewidth,duty,number):\n command = str.encode(\"DP:\"+str(ch)+\":\"+str(int(pulsewidth))+\":\"+str(duty)+\":\"+str(number)+\"\\n\")\n ser.write(command)\n def ArduinoAO(flag,values):\n #ser = serial.Serial('COM3',9600,timeout=1)\n #ser.flushInput()\n print(values)\n if flag == True:\n #import serial\n ser.write(b'AO6v200\\n')\n ser.write(b'AO9v400\\n')\n ser.write(b'AO10v600\\n')\n ser.write(b'AO11v800\\n')\n else:\n ser.write(b'AO6v0\\n')\n ser.write(b'AO9v0\\n')\n ser.write(b'AO10v0\\n')\n ser.write(b'AO11v0\\n')\n #print('AO6v1000')\n #ser.close()\n def NIDAQAI(x,y):\n import nidaqmx\n with nidaqmx.Task() as task:\n task.ai_channels.add_ai_voltage_chan(\"Dev1/ai0:3\",\n terminal_config=nidaqmx.constants.TerminalConfiguration.RSE)\n x.append(time.time())\n y.extend(task.read(number_of_samples_per_channel=1))\n return(x,y)\n\n def NIDAQ_Stream(num_ch,num_smpl,rate):\n import numpy\n import nidaqmx\n from nidaqmx.stream_readers import (AnalogSingleChannelReader, AnalogMultiChannelReader)\n from nidaqmx.constants import (Edge, Slope)\n from nidaqmx._task_modules.triggering.start_trigger import StartTrigger\n \n #from nidaqmx.tests.fixtures import x_series_device\n # import nidaqmx.task as task\n data= numpy.zeros((num_ch,num_smpl), dtype=numpy.float64)\n with nidaqmx.Task() as read_task:\n read_task.ai_channels.add_ai_voltage_chan(\"Dev1/ai0:\" + str(num_ch-1),\n terminal_config=nidaqmx.constants.TerminalConfiguration.RSE)\n\n read_task.timing.cfg_samp_clk_timing(rate,samps_per_chan=num_smpl)\n #ead_task.triggers.start_trigger.cfg_dig_edge_start_trig(trigger_source=\"/Dev2/PFI0\")\n #read_task.timing.delay_from_samp_clk_delay=0\n #s=nidaqmx.stream_readers.AnalogMultiChannelReader()\n \n reader=AnalogMultiChannelReader(read_task.in_stream)\n reader.read_many_sample(data,timeout=num_smpl/rate+1)\n # print(data)\n return(data)\n def NIDAQ_Trigger():\n import numpy\n import nidaqmx\n from nidaqmx.stream_readers import (AnalogSingleChannelReader,\n AnalogMultiChannelReader)\n from nidaqmx.constants import (Edge, Slope)\n from nidaqmx._task_modules.triggering.start_trigger import StartTrigger\n \n #from nidaqmx.tests.fixtures import x_series_device\n # import nidaqmx.task as task\n data= numpy.zeros((3,1000), dtype=numpy.float64)\n \n with nidaqmx.Task() as read_task:\n read_task.ai_channels.add_ai_voltage_chan(\"Dev1/ai0:2\",\n terminal_config=nidaqmx.constants.TerminalConfiguration.RSE)\n\n read_task.timing.cfg_samp_clk_timing(1e4, active_edge=Edge.RISING,samps_per_chan=1000)\n read_task.triggers.start_trigger.cfg_dig_edge_start_trig(trigger_source=\"/Dev2/PFI0\")\n #read_task.timing.delay_from_samp_clk_delay=0\n #s=nidaqmx.stream_readers.AnalogMultiChannelReader()\n \n reader=AnalogMultiChannelReader(read_task.in_stream)\n reader.read_many_sample(data, number_of_samples_per_channel=1000,timeout=1)\n # print(data)\n return(data) \n def NIDAQ_DO():\n import nidaqmx\n from nidaqmx.constants import LineGrouping\n import pyvisa as visa\n import time\n rm = visa.ResourceManager()\n wv = rm.get_instrument(\"USB0::0x0D4A::0x000D::9148960::INSTR\")\n wv.write(':TRIGger1:SEQuence:IMMediate')\n \n with nidaqmx.Task () as task:\n task.do_channels.add_do_chan(\"Dev1/port1/line7\",\n line_grouping=LineGrouping.CHAN_PER_LINE)\n task.write(True,auto_start=True)\n# time.sleep(1)\n task.write(False,auto_start=True)\n","sub_path":"NIDAQ_plt3.py","file_name":"NIDAQ_plt3.py","file_ext":"py","file_size_in_byte":6693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"112241628","text":"import os\r\n\r\nwhile True:\r\n\ta = raw_input(\"\")\r\n\tcol = 'color ' + a\r\n\tos.system(col)\r\n'''\r\nSets the default console foreground and background colors.\r\n\r\nCOLOR [attr]\r\n\r\n attr Specifies color attribute of console output\r\n\r\nColor attributes are specified by TWO hex digits -- the first\r\ncorresponds to the background; the second the foreground. Each digit\r\ncan be any of the following values:\r\n\r\n 0 = Black 8 = Gray\r\n 1 = Blue 9 = Light Blue\r\n 2 = Green A = Light Green\r\n 3 = Aqua B = Light Aqua\r\n 4 = Red C = Light Red\r\n 5 = Purple D = Light Purple\r\n 6 = Yellow E = Light Yellow\r\n 7 = White F = Bright White\r\n\r\nIf no argument is given, this command restores the color to what it was\r\nwhen CMD.EXE started. This value either comes from the current console\r\nwindow, the /T command line switch or from the DefaultColor registry\r\nvalue.\r\n'''\r\n","sub_path":"color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"514675203","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# @Author: Jialiang Shi\nfrom sonarqube.config import (\n API_PROJECT_PULL_REQUESTS_DELETE_ENDPOINT,\n API_PROJECT_PULL_REQUESTS_LIST_ENDPOINT\n)\n\n\nclass SonarQubepRrojectPullRequests:\n def __init__(self, sonarqube):\n self.sonarqube = sonarqube\n\n def search_project_pull_requests(self, project):\n \"\"\"\n List the pull requests of a project.\n\n :param project: Project key\n :return:\n \"\"\"\n params = {\n 'project': project\n }\n resp = self.sonarqube.make_call('get', API_PROJECT_PULL_REQUESTS_LIST_ENDPOINT, **params)\n return resp.json()\n\n def delete_project_pull_requests(self, project, pull_request_id):\n \"\"\"\n Delete a pull request.\n\n :param project: Project key\n :param pull_request_id: Pull request id\n :return:\n \"\"\"\n params = {\n 'project': project,\n 'pullRequest': pull_request_id\n }\n self.sonarqube.make_call('post', API_PROJECT_PULL_REQUESTS_DELETE_ENDPOINT, **params)\n","sub_path":"sonarqube/project_pull_requests.py","file_name":"project_pull_requests.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"385518575","text":"import re\nimport math\n\ndef chromosomeToCycle(chromosome):\n nodes = [0] * (2 * len(chromosome))\n for j in range(1, len(chromosome) + 1):\n i = int(chromosome[j-1])\n if i > 0:\n nodes[2*j - 1 - 1] = 2*i - 1\n nodes[2*j - 1] = 2*i\n else:\n nodes[2*j - 1 - 1] = -2*i\n nodes[2*j - 1] = -2*i - 1\n\n return nodes\n\n\ndef findEdge(num, edges):\n for edge in edges:\n if num in edge:\n return edge\n\n\ndef nodeToCycleInd(nodeNum):\n return int(math.ceil(nodeNum/2) - 1)\n\n\ndef findCycles_single_chrom(coloredEdges, max_val):\n cycle_ind_array = [i for i in range(max_val // 2)]\n for edge in coloredEdges:\n start, end = edge[0], edge[1]\n start_ind = nodeToCycleInd(start)\n end_ind = nodeToCycleInd(end)\n\n orig_start = cycle_ind_array[start_ind]\n orig_end = cycle_ind_array[end_ind]\n\n cycle_ind_array[start_ind] = min(cycle_ind_array[start_ind], cycle_ind_array[end_ind])\n cycle_ind_array[end_ind] = min(cycle_ind_array[start_ind], cycle_ind_array[end_ind])\n\n for i, elem in enumerate(cycle_ind_array):\n if elem == orig_start or elem == orig_end:\n cycle_ind_array[i] = cycle_ind_array[start_ind]\n\n # print(cycle_ind_array)\n\n return cycle_ind_array\n\n\ndef flipNode(num):\n flip = num - 1 if num % 2 == 0 else num + 1\n return flip\n\n\ndef nodeExists(start, edges):\n for elem in edges:\n if elem[0] == start or elem[1] == start:\n return True\n return False\n\n\ndef findNextEdge(start, edges):\n for i, elem in enumerate(edges):\n if elem[0] == start or elem[1] == start:\n return elem, i\n\n\ndef edgesToChromosomes(coloredEdges):\n chromosomes = []\n currChrom = []\n edges_copy = coloredEdges.copy()\n startEdge = edges_copy[0]\n currChrom += [startEdge[1]]\n\n start = flipNode(startEdge[1])\n\n while edges_copy:\n if nodeExists(start, edges_copy):\n startEdge, edge_ind = findNextEdge(start, edges_copy)\n del edges_copy[edge_ind]\n if start == startEdge[0]:\n currChrom += startEdge\n start = flipNode(startEdge[1])\n else:\n currChrom += startEdge[::-1]\n start = flipNode(startEdge[0])\n else:\n chromosomes.append(currChrom)\n currChrom = []\n startEdge = edges_copy[0]\n currChrom += [startEdge[1]]\n\n start = flipNode(startEdge[1])\n chromosomes.append(currChrom)\n\n return chromosomes\n\n\ndef numToStr(num):\n if num < 0:\n return str(num)\n else:\n return '+' + str(num)\n\n\ndef cycleToChromosome(nodes):\n chrom = [' '] * int(len(nodes) / 2)\n for j in range(1, len(chrom) + 1):\n if nodes[2*j - 1 - 1] < nodes[2*j - 1]:\n chrom[j - 1] = numToStr(nodes[2*j - 1] // 2)\n else:\n chrom[j - 1] = numToStr(-(nodes[2*j - 1 - 1 ] // 2))\n\n return chrom\n\n\ndef graphToGenome(coloredEdges):\n cycles = edgesToChromosomes(coloredEdges)\n chromosomes = []\n for cycle in cycles:\n chromosomes.append(cycleToChromosome(cycle))\n\n return chromosomes\n\n\nwith open('test.txt', 'r+') as f:\n graph = f.readline().rstrip()\n sp = re.split('[()]', graph)[1:-1]\n\n colored_edges = [tuple(int(x) for x in sp[i].split(', ')) for i in range(0, len(sp), 2)]\n\n genome = graphToGenome(colored_edges)\n for graph in genome:\n print('(' + ' '.join(graph) + ')', end='')","sub_path":"GraphToGenome.py","file_name":"GraphToGenome.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"41520115","text":"import os\nimport logging\nfrom datetime import datetime\nfrom django.conf import settings\nfrom django.db.utils import cached_property\nfrom django.utils import timezone\nfrom django.core.management.base import BaseCommand\nfrom cameras_app.models import Camera, Video\n\nlogger = logging.getLogger(\"video_seeker\")\n\n\nclass VideoSeeker:\n video_extensions = ('.mp4', '.flv', '.webm', '.ogg', '.avi')\n\n def __init__(self, camera):\n self.camera = camera\n\n def run(self):\n logger.info(\"Running task for path {}\".format(self.path))\n if os.path.exists(self.path) and os.path.isdir(self.path):\n self.search_new_videos()\n self.remove_old_videos()\n self.search_posters_for_old_videos()\n\n else:\n self.remove_all_videos()\n\n def search_new_videos(self):\n for video_file in self.video_files:\n if not Video.objects.filter(filename=os.path.basename(video_file)).exists():\n logger.info(\"Creating video {}\".format(video_file))\n Video.objects.create(\n camera=self.camera,\n filename=os.path.basename(video_file),\n poster=self.get_poster(video_file),\n type=os.path.splitext(video_file)[1].strip(\".\"),\n created=self.get_creation_date(video_file)\n )\n\n @cached_property\n def path(self):\n return os.path.join(settings.MEDIA_ROOT, self.camera.folder)\n\n @cached_property\n def video_files(self):\n return self.get_files(self.video_extensions)\n\n def get_files(self, extensions):\n return [os.path.join(self.path, file)\n for file in self.all_files_in_path\n if os.path.splitext(file)[1] in extensions]\n\n @cached_property\n def all_files_in_path(self):\n return os.listdir(self.path)\n\n def get_creation_date(self, file_path):\n return datetime.fromtimestamp(os.stat(file_path).st_mtime,\n tz=timezone.get_current_timezone())\n\n def get_poster(self, file):\n video_mtime = os.stat(file).st_mtime\n for jpeg, jpeg_mtime in self.jpegs_with_mtime.items():\n if int(jpeg_mtime) == int(video_mtime):\n logger.info(\"Got poster {}\".format(jpeg))\n return os.path.basename(jpeg)\n\n return \"\"\n\n @cached_property\n def jpegs_with_mtime(self):\n result = {}\n jpegs = self.get_files(extensions=['.jpg', '.jpeg'])\n for jpeg in jpegs:\n result[jpeg] = os.stat(jpeg).st_mtime\n\n return result\n\n def remove_old_videos(self):\n to_delete = []\n\n for video_obj in Video.objects.filter(camera=self.camera):\n if video_obj.filename not in self.all_files_in_path:\n to_delete.append(video_obj)\n\n for video_obj in to_delete:\n logger.info(\"Deleted {}\".format(video_obj))\n video_obj.delete()\n\n def remove_all_videos(self):\n logger.info(\"Removing all video records\")\n Video.objects.filter(camera=self.camera).delete()\n\n def search_posters_for_old_videos(self):\n for video_obj in Video.objects.filter(camera=self.camera, poster=\"\"):\n logger.info(\"Searching poster again for {}\".format(\n video_obj.filename))\n poster = self.get_poster(os.path.join(self.path, video_obj.filename))\n\n if poster:\n video_obj.poster = poster\n video_obj.save()\n\n\nclass Command(BaseCommand):\n help = \"Scan video files for all cameras\"\n\n def handle(self, *args, **options):\n cameras = Camera.objects.all()\n for camera in cameras:\n seeker = VideoSeeker(camera)\n seeker.run()\n","sub_path":"cameras_app/management/commands/scan_video_files.py","file_name":"scan_video_files.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"57900613","text":"from math import sqrt\r\n#from functools import lru_cache\r\n\r\nIN = 'input.txt'\r\nOUT = 'output.txt'\r\n\r\n\r\ndef to_bin(x, n):\r\n r = []\r\n while x:\r\n r.append(x % 2)\r\n x //= 2\r\n\r\n l = len(r)\r\n if l < n:\r\n r += [0] * (n - l)\r\n\r\n return r\r\n\r\n\r\ndef to_base_n(n, b):\r\n r = 0\r\n for i in range(len(b)):\r\n r += b[i] * n ** i\r\n\r\n return r\r\n\r\n\r\ndef get_div(x):\r\n for i in range(2, int(sqrt(x)) + 1):\r\n if x % i == 0:\r\n return i\r\n\r\n return -1\r\n\r\n\r\ndef sol(line):\r\n n, u = map(int, line.split())\r\n n -= 2\r\n res = []\r\n k = 0\r\n for i in range(2 ** n):\r\n divs = []\r\n fail = False\r\n b = [1] + to_bin(i, n) + [1]\r\n for j in range(2, 11):\r\n x = to_base_n(j, b)\r\n d = get_div(x)\r\n if d == -1:\r\n fail = True\r\n break\r\n divs.append(d)\r\n if fail:\r\n continue\r\n res.append('{} {}'.format(\r\n ''.join(list(map(str, [1] + to_bin(i, n)[::-1] + [1]))),\r\n ' '.join(list(map(str, divs)))\r\n ))\r\n k += 1\r\n if k == u:\r\n break\r\n\r\n return '\\n' + '\\n'.join(res)\r\n\r\n\r\ndef main():\r\n with open(IN, 'r') as f, open(OUT, 'w') as g:\r\n t = int(f.readline())\r\n for i in range(t):\r\n g.write('Case #{}: '.format(i + 1))\r\n g.write(sol(f.readline().strip()))\r\n g.write('\\n')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"codes/CodeJamCrawler/16_0_3_neat/16_0_3_devmeow_maing.py","file_name":"16_0_3_devmeow_maing.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"240313734","text":"import numpy as np\n\ndef write_mat(Rt, fp):\n for i in range(Rt.shape[0]-1):\n for j in range(Rt.shape[1]):\n fp.write(str(Rt[i,j]) + \" \")\n fp.write(\"\\n\")\n\ndef write_vec(vec, fp):\n for i in range(vec.shape[0]):\n fp.write(str(vec[i]) + \" \")\n fp.write(\"\\n\")\n\nfp = open(\"path_noise.txt\", \"w\")\n#fp = open(\"path.txt\", \"w\")\n#length = 10.0 # meters\nlength = 7.0 # meters\n#nframes = 40\nnframes = 20\n\ndelta_z = length / nframes\n\n#Rt = np.eye(4,4)\n\n# using Rodrigues rotation\nvec = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n\nfor i in range(nframes):\n #write_mat(Rt, fp)\n #Rt[2,3] += delta_z\n write_vec(vec, fp)\n vec[5] -= delta_z\n","sub_path":"scripts/stereo_model_sba/generate_straight_path.py","file_name":"generate_straight_path.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"57781321","text":"\"\"\"\nThis module is used for testing the classifier I created with\nthe test data which was established in the extracting data file.\n\"\"\"\n\ndef test_classifier(data, greater, less):\n \"\"\"\n This function reads the test data created and compares it with the >50K and <=50K lists created from\n the targetData function. It then determines which class each attribute is closer to, giving a final output\n for accuracy based upon correct and incorrect guesses.\n\n Parameters: \n data: the source of the test data to compare to\n\n greater: the >50K list\n\n less: the <=50K list\n\n Returns: print output for correct/incorrect guesses, and overall accuracy (%)\n \"\"\"\n\n # Establish variable to track guesses\n correct_guesses = 0\n\n # Sweep through data entries to find which class the entry is closer to\n for entry in data:\n less_points = 0\n greater_points = 0\n\n # Run through each attribute in the entry and score whether or not the guess is correct\n for attribute in entry:\n if attribute == \"age\":\n if (abs(entry[attribute] - less[0]) < \\\n abs(entry[attribute] - greater[0])):\n less_points += 1\n else:\n greater_points += 1\n\n elif attribute == \"workclass\":\n if (less[1][entry[attribute]] > \\\n greater[1][entry[attribute]]):\n less_points += 1\n else:\n greater_points += 1\n\n elif attribute == \"educationnum\":\n if (abs(entry[attribute] - less[2]) < \\\n abs(entry[attribute] - greater[2])):\n less_points += 1\n else:\n greater_points += 1\n\n elif attribute == \"marital\":\n if (less[3][entry[attribute]] > \\\n greater[3][entry[attribute]]):\n less_points += 1\n else:\n greater_points += 1\n\n elif attribute == \"occupation\":\n if (less[4][entry[attribute]] > \\\n greater[4][entry[attribute]]):\n less_points += 1\n else:\n greater_points += 1\n\n elif attribute == \"relationship\":\n if (less[5][entry[attribute]] > \\\n greater[5][entry[attribute]]):\n less_points += 1\n else:\n greater_points += 1\n\n elif attribute == \"race\":\n if (less[6][entry[attribute]] > \\\n greater[6][entry[attribute]]):\n less_points += 1\n else:\n greater_points += 1\n\n elif attribute == \"sex\":\n if (less[7][entry[attribute]] > \\\n greater[7][entry[attribute]]):\n less_points += 1\n else:\n greater_points += 1\n \n elif attribute == \"capitalgain\":\n if (abs(entry[attribute] - less[8]) < \\\n abs(entry[attribute] - greater[8])):\n less_points += 1\n else:\n greater_points += 1\n\n elif attribute == \"capitalloss\":\n if (abs(entry[attribute] - less[9]) < \\\n abs(entry[attribute] - greater[9])):\n less_points += 1\n else:\n greater_points += 1\n\n elif attribute == \"hours\":\n if (abs(entry[attribute] - less[10]) < \\\n abs(entry[attribute] - greater[10])):\n less_points += 1\n else:\n greater_points += 1\n\n else:\n if entry[attribute] == \"<=50K\":\n is_less = True\n else:\n is_less = False\n\n # Define guesses both correct and incorrect\n guess = True if less_points > greater_points else False\n correct = True if guess == is_less else False\n\n if correct:\n correct_guesses += 1\n\n # Calculate the accuracy (%) based upon correct and incorrect guesses\n accuracy = (correct_guesses / len(data)) * 100\n\n # Correct formatting for output of guesses and accuracy within the main function/module\n print(\"Correct Guesses: \", correct_guesses)\n print(\"Incorrect Guesses: \", len(data) - correct_guesses)\n print(\"Accuracy: \"+str(round(accuracy,2))+\"%\")\n\n \n","sub_path":"Assignment 2/testClassifier.py","file_name":"testClassifier.py","file_ext":"py","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"613896278","text":"\"\"\"\nTitle: training.py\nPrescription: Training the rnn model\nAuthor: Yeol Ye\nDeclaration: The LSTM structure credits to Zhijing Li\n\"\"\"\n\nfrom tensorflow.keras.callbacks import EarlyStopping\nimport utils\nimport pickle\nimport sys\nimport tensorflow as tf\nimport numpy as np\n\ndownsample_ratio = int(sys.argv[1])\nwindow_size = int(sys.argv[2])\npredict_size = int(sys.argv[3])\nshift_train = int(sys.argv[4])\nshift_eval = int(sys.argv[5])\nbatch_size = int(sys.argv[6])\nepochs = int(sys.argv[7])\n\ndownsample_str = 'downsample_' + str(downsample_ratio)\nwindow_predict_size = str(sys.argv[2]) + '_' + str(sys.argv[3])\nnormal_series_list_path = '../../data/dataset/' + downsample_str + '/' \\\n + 'normal_series_list_' + window_predict_size\nabnormal_series_list_path = '../../data/dataset/' + downsample_str + '/' \\\n + 'abnormal_series_list_' + window_predict_size\nfull_x_valid_path = '../../data/dataset/' + downsample_str + '/' \\\n + 'full_x_valid_' + window_predict_size\nmodel_path = '../../model/{}_{}_{}'.format(downsample_ratio, window_size,\n predict_size)\n\n##########################################################\n# Load and Process Data\n##########################################################\n# Load normal list and abnormal list of series\nwith open(normal_series_list_path, 'rb') as f:\n normal_series_list = pickle.load(f)\n\nwith open(abnormal_series_list_path, 'rb') as f:\n abnormal_series_list = pickle.load(f)\n\n# Initiate data\ntemp = normal_series_list[0].copy()\nsplit_time = int(temp.shape[0] * 0.8)\ntemp_x_train = temp[:split_time]\ntemp_x_valid = temp[split_time:]\nfull_x_valid = temp_x_valid.copy()\n\n# Initiate training and valid set\nfull_train_set = utils.windowed_dataset(temp_x_train, window_size, batch_size,\n predict_size, shift_train)\nfull_valid_set = utils.windowed_dataset(temp_x_valid, window_size, batch_size,\n predict_size, shift_eval)\n\n# Create full train set and full valid set\nfor series in normal_series_list[1:]:\n split_time = int(series.shape[0] * 0.8)\n x_train = series[:split_time]\n x_valid = series[split_time:]\n full_x_valid = np.concatenate((full_x_valid, x_valid))\n\n train_set = utils.windowed_dataset(x_train, window_size, batch_size,\n predict_size, shift_train)\n valid_set = utils.windowed_dataset(x_valid, window_size, batch_size,\n predict_size, shift_eval)\n\n full_train_set = full_train_set.concatenate(train_set)\n full_valid_set = full_valid_set.concatenate(valid_set)\n\n\n# Create full abnormal series list\nabnormal_set_list = []\nfor series in abnormal_series_list:\n abnormal_set = utils.windowed_dataset(series, window_size, batch_size,\n predict_size, shift_eval)\n abnormal_set_list.append(abnormal_set)\n\n\n# Save full_x_valid for future threshold use\nwith open(full_x_valid_path, 'wb') as f:\n pickle.dump(full_x_valid, f)\n\n\n##########################################################\n# Compile model\n##########################################################\nmodel = tf.keras.models.\\\n Sequential([tf.keras.layers.LSTM(64, return_sequences=True,\n input_shape=[None, 128]),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.LSTM(64, return_sequences=True),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.LSTM(64, return_sequences=False),\n tf.keras.layers.Dense(128 * predict_size),\n tf.keras.layers.Reshape((predict_size, 128))])\n\nes = EarlyStopping(monitor='mse',\n min_delta=0.0001,\n patience=5)\n\nmodel.compile(loss='mean_squared_error',\n optimizer='adam',\n metrics=[\"mse\"],\n callbacks=[es])\n\n##########################################################\n# Fit model\n##########################################################\nhistory = model.fit(full_train_set, epochs=epochs, callbacks=[es])\nmodel.save(model_path)\n\n\n##########################################################\n# Model Validation and Evaluation\n##########################################################\nprint('Validate model on valid set (using normal data):')\nmodel.evaluate(full_valid_set)\n\nprint('Evaluate model on test set (using abnormal data):')\nfor i, abnormal_set in enumerate(abnormal_set_list):\n print('Abnormal set: ', i)\n print(model.evaluate(abnormal_set))\n\nprint('Training finished!')\n","sub_path":"code/model/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":4665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"406195049","text":"import FWCore.ParameterSet.Config as cms\nimport sys\n\ndef getInstanceName(obj, pyNameSpace = None, process = None):\n if process is not None:\n return obj.label()\n else:\n if pyNameSpace is not None:\n for name, ref in pyNameSpace.items():\n if ref is obj : return name\n else:\n for pyModule in sys.modules.values():\n for name, ref in pyModule.__dict__.items():\n if ref is obj : return name\n return None\n \n\nclass CutSequenceProducer(cms._ParameterTypeBase):\n #init functions get the input collection to the cut sequence\n def __init__(self,firstSource,pyModuleName,pyNameSpace) :\n self.input = firstSource\n self.sequence = None\n self.pyModuleName = pyModuleName,\n self.pyNameSpace = pyNameSpace \n self.inputLabel='src'\n\n def changeInput(self,src):\n self.input=src\n\n def changeInputLabel(self,sourceLabel):\n self.inputLabel=sourceLabel\n\n #Creates a selection/filter/counter sequence \n def addCut(self,module,summaryText = None,minFilter = 1, maxFilter = 9999):\n\n\n #try to find the name of the module\n moduleName=getInstanceName(module,self.pyNameSpace)\n\n if(moduleName != None):\n \n #Set the correct source\n setattr(module,self.inputLabel,cms.InputTag(self.input))\n #Add module to the sequence\n if self.sequence == None:\n self.sequence=module\n else:\n self.sequence*=module\n\n self.input=moduleName \n #Create the Filter\n if minFilter>0 or maxFilter<9998:\n filter = cms.EDFilter(\"PATCandViewCountFilter\")\n filter.minNumber = cms.uint32(minFilter)\n filter.maxNumber = cms.uint32(maxFilter)\n filter.src = cms.InputTag(moduleName)\n filterName = moduleName+'Filter'\n filter.setLabel(filterName) \n #Register the filter in the namespace\n pyModule = sys.modules[self.pyModuleName[0]]\n if pyModule is None:\n raise ValueError(\"'pyModuleName' Parameter invalid\")\n setattr(pyModule,filterName,filter)\n self.sequence*=filter\n\n #now the counter\n if summaryText is not '':\n counter = cms.EDFilter(\"EventCounter\")\n counter.name=cms.string(summaryText)\n counterName = moduleName+'Counter'\n counter.setLabel(counterName)\n pyModule = sys.modules[self.pyModuleName[0]]\n if pyModule is None:\n raise ValueError(\"'pyModuleName' Parameter invalid\")\n setattr(pyModule,counterName,counter)\n self.sequence*=counter\n\n def returnSequence(self):\n return cms.Sequence(self.sequence)\n \n","sub_path":"RecoTools/python/tools/CutSequenceProducer.py","file_name":"CutSequenceProducer.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"537101993","text":"from math import sqrt\n\ndef upsqrt(x):\n k=sqrt(x)\n res=1\n while res>1\n\n\nclass Nazoki:\n def __init__(self,universe):\n self.u=1\n while self.u 2:\n if self.high(x) not in self.cluster:\n self.cluster[self.high(x)]=Nazoki(botsqrt(self.u))\n if self.summary==\"NIL\":\n self.summary=Nazoki(upsqrt(self.u))\n self.summary.insert(self.high(x))\n self.cluster[self.high(x)].empinsert(self.low(x))\n else:\n self.cluster[self.high(x)].insert(self.low(x))\n if x > self.max:\n self.max = x\n\n def delete(self,x):\n if self.min == self.max:\n self.min = self.max =\"NIL\"\n return True\n elif self.u==2:\n if x==0:\n self.min=1\n else:\n self.min=0\n self.max = self.min\n return False\n else:\n if x == self.min:\n fc = self.summary.min\n x = self.index(fc,self.cluster[fc].min)\n self.min=x\n k=self.cluster[self.high(x)].delete(self.low(x))\n if k:\n del self.cluster[self.high(x)]\n kk=self.summary.delete(self.high(x))\n if x == self.max:\n if kk:\n self.max = self.min\n else:\n sm = self.summary.max\n self.max = self.index(sm,self.cluster[sm].max)\n elif x == self.max:\n self.max = self.index(self.high(x),self.cluster[self.high(x)].max)\n\n\n\n\n def successor(self,x):\n if self.u == 2:\n if x == 0 and self.max ==1:\n return 1\n else:\n return \"NIL\"\n elif self.min!=\"NIL\" and x < self.min:\n return self.min\n else:\n ml = \"NIL\"\n if self.high(x) in self.cluster:\n ml = self.cluster[self.high(x)].max\n if ml != \"NIL\" and self.low(x) < ml:\n offset = self.cluster[self.high(x)].successor(self.low(x))\n return self.index(self.high(x),offset)\n else:\n sc = \"NIL\"\n sc = self.summary.successor(self.high(x))\n if sc ==\"NIL\":\n return \"NIL\"\n else:\n offset = self.cluster[sc].min\n return self.index(sc,offset)\n\n\n def predecessor(self,x):\n if self.u == 2:\n if x == 1 and self.min == 0:\n return 0\n else:return \"NIL\"\n elif self.max != \"NIL\" and x > self.max:\n return self.max\n else:\n ml = \"NIL\"\n if self.high(x) in self.cluster:\n ml=self.cluster[self.high(x)].min\n if ml != \"NIL\" and self.low(x) > ml:\n offset = self.cluster[self.high(x)].predecessor(self.low(x))\n return self.index(self.high(x),offset)\n else:\n pc = \"NIL\"\n if self.summary != \"NIL\" :\n pc = self.summary.predecessor(self.high(x))\n if pc == \"NIL\":\n if self.min != \"NIL\" and x > self.min:\n return self.min\n else:\n return \"NIL\"\n else:\n offset = self.cluster[pc].max\n return self.index(pc,offset)\n","sub_path":"library/nazo_tree.py","file_name":"nazo_tree.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"468764021","text":"import argparse\nfrom typing import Union, TypeVar, Optional, Any, Dict, List\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom common import models\nfrom common.config import *\n\n\n\ndef get_postgres_conn():\n \"\"\"\n Return PostgreSQL engine.\n \"\"\"\n\n return create_engine(\n f\"postgresql://{POSTGRES_USER}:{POSTGRES_PASS}@{POSTGRES_HOST}:{POSTGRES_PORT}/{POSTGRES_NAME}?sslmode=disable\",\n client_encoding='utf8'\n )\n\n\ndef get_postgres_session():\n \"\"\"\n Return PostgreSQL SQLAlchemy session.\n \"\"\"\n\n engine = get_postgres_conn()\n session = sessionmaker(bind=engine)()\n\n return session\n\n\ndef init_db() -> None:\n \"\"\"\n Create all tables described in models.py\n \"\"\"\n\n engine = get_postgres_conn()\n models.Base.metadata.create_all(engine)\n\n\ndef drop_db() -> None:\n \"\"\"\n Drop all tables described in models.py\n \"\"\"\n\n engine = get_postgres_conn()\n models.Base.metadata.drop_all(engine)\n\n\ndef drop_table(table: str) -> None:\n \"\"\"\n Example:\n >>> drop_table('account')\n \"\"\"\n\n engine = get_postgres_conn()\n\n ItemModel = get_item_model(table)\n if ItemModel:\n ItemModel.__table__.drop(engine)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(prog='PROG', description='description')\n parser.add_argument('-t', '--task', help=\"Task name.\")\n parser.add_argument('--table', help=\"Table name\")\n\n args = vars(parser.parse_args())\n task = args.pop('task')\n\n # Remove empty variables\n non_empty_args = dict([(k, args[k]) for k in args if args[k]])\n\n locals()[task](**non_empty_args)\n","sub_path":"common/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"55788917","text":"# (c) 2015 Magnus \"Tuxie\" Johnsson, magnusjjj@gmail.com\n# Licensed under the BSD license, see LICENSE.TXT in the root folder.\n# Revision 1\n# Changelog:\n# 2015-04-14 - Magnus Johnsson - Added the license header\n\n# This file handles what url's are mapped to what view.\n# This is much better explained in the Django manual,\n# and we don't do anything freakier than what is explained in the tutorials. Promise <3\n# https://docs.djangoproject.com/en/1.8/topics/http/urls/\n\nfrom django.conf.urls import patterns, url\n\nfrom server import views\n\nurlpatterns = patterns('',\n url(r'^$', views.ViewIndex.as_view(), name='index'),\n\turl(r'^$servers/', views.ViewServerList.as_view(), name='serverlist'),\n\turl(r'^volunteer/(?P\\d+)/$', views.ViewVolunteerFor.as_view(), name='volunteer'),\n\turl(r'^editserver/(?P\\d+)/$', views.EditServer.as_view(), name='editserver'),\n\turl(r'^newserver', views.EditServer.as_view(), name='newserver'),\n\turl(r'^managevolunteers/(?P\\d+)/$', views.ViewManageVolunteers.as_view(), name='managevolunteers'),\n\turl(r'^servereditapplicant$', views.ServerEditApplicant.as_view(), name='servereditapplicant'),\n\turl(r'^serverviewanswers$', views.ServerViewAnswers.as_view(), name='serverviewanswers'),\n\turl(r'^uploadserverimage/(?P\\d+)/$', views.UploadServerImage.as_view(), name='uploadserverimage'),\n\turl(r'^updateserverinfo/(?P\\d+)/$', views.UpdateServerInfo.as_view(), name='updateserverinfo'),\n\turl(r'^addpage/(?P\\d+)/$', views.AddPage.as_view(), name='addpage'),\n\turl(r'^deletepage/(?P\\d+)/$', views.DeletePage.as_view(), name='deletepage'),\n\turl(r'^editpage/(?P\\d+)/$', views.EditPage.as_view(), name='editpage'),\n\turl(r'^rocketchatcreateserverapi$', views.rocketchatcreateserverapi.as_view(), name='rocketchatcreateserverapi'),\n\turl(r'^(?P[a-zA-Z\\-]+)/$', views.ViewDetail.as_view(), name='detail'),\n\turl(r'^(?P[a-zA-Z\\-]+)/(?P[a-zA-Z\\-]+)$', views.ViewDetail.as_view(), name='detail'),\n)\n","sub_path":"server/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"165766832","text":"\"\"\"\n A module fro the implementation of the ZigBee PRO protocol\n\"\"\"\nimport logging\nfrom logging.config import fileConfig\nimport yaml\nfrom collections import namedtuple\nimport asyncio\nimport ember\nfrom ember_utils import TooManyTriesException\nfrom ember_utils import UnexpectedStatusException\nfrom ember_utils import print_dic\nfrom constants import ALL_CHANNEL_MASK\nfrom constants import ENABLE_JOINING\nimport ezsp_types as e\nfrom random import getrandbits\n\nLOGGER = logging.getLogger(__name__)\nfileConfig('logging_config.ini', disable_existing_loggers=False)\n\nclass Cinder():\n \"\"\"A class implementing the ZigBee protocol with an NCP running the\n EmberZnet Pro stack\"\"\"\n\n _stack_status = None\n\n def __init__(self):\n self._is_ready_future = asyncio.Future()\n self.api = ember.Ember('/dev/ttyUSB0', baudrate=115200)\n self.api.add_callback(0x19, self.cb_stack_status)\n self.api.add_callback(0x23, self.cb_joining)\n self.api.add_callback(0x45, self.cb_incoming_message)\n self.network_address = None\n self.mac_address = None\n\n from constants import CONFIGURATION_FILE\n try:\n with open(CONFIGURATION_FILE) as f:\n config = yaml.load(f)\n except IOError:\n LOGGER.warning(\"Failed to find the EZSP configuration file\")\n self.configuration = {}\n else:\n self.configuration = config['cinder']\n\n def is_ready(self):\n \"\"\"Returns a future that indicates when the init process is over\"\"\"\n return self._is_ready_future\n\n async def _check_network_parameters(self, panid, epanid, channel):\n \"\"\"Checks whether the NCP is connected with the right network\n parameters\"\"\"\n answer = await self.api.get_network_parameters()\n parameters = answer['parameters']\n if answer['nodeType'] != e.EmberNodeType.EMBER_COORDINATOR:\n return False\n if panid and parameters['panId'] != panid:\n return False\n if epanid and parameters['extendedPanId'] != epanid:\n return False\n if channel and parameters['radioChannel'] != channel:\n return False\n parameters_tuple = namedtuple(\"parameters\", ['panid',\n 'epanid',\n 'channel'])\n return parameters_tuple(parameters['panId'],\n parameters['extendedPanId'],\n parameters['radioChannel'])\n\n async def _wait_for_stack_status(self, status):\n from constants import STACK_STATUS_CHANGE_TIMEOUT as timeout\n for seconds in range(timeout):\n if self._stack_status == status:\n return\n await asyncio.sleep(1)\n raise TimeoutError\n\n async def network_connect(\n self,\n panid=None,\n epanid=None,\n channel=None,\n join_method=e.EmberJoinMethod.EMBER_USE_MAC_ASSOCIATION):\n \"\"\"Connects to a ZigBee network with the given parameters\"\"\"\n # Rejoin the network\n try:\n await self.api.network_init()\n parameters = await self._check_network_parameters(\n panid,\n epanid,\n channel)\n if parameters:\n LOGGER.info(f\"Rejoined on {parameters.panid:X} \"\n + f\"as coordinator \"\n + f\"({parameters.epanid:X}/{parameters.channel})\")\n self.network_address = (await self.api.get_node_id())['nodeId']\n self.mac_address = (await self.api.get_eui64())['eui64']\n return\n else:\n await self.api.leave_network()\n LOGGER.debug(\"Waiting for deconnection...\")\n await self._wait_for_stack_status('EMBER_NETWORK_DOWN')\n\n except UnexpectedStatusException as status:\n if str(status) == 'EMBER_NOT_JOINED':\n LOGGER.info(\"No network to rejoin.\")\n else:\n raise status\n\n # No proper network to rejoin, creating one\n if not panid:\n from constants import PAN_ID_BIT_SIZE\n panid = getrandbits(PAN_ID_BIT_SIZE)\n if not epanid:\n from constants import EPAN_ID_BIT_SIZE\n epanid = getrandbits(EPAN_ID_BIT_SIZE)\n if not channel:\n from constants import DEFAULT_RADIO_CHANNEL\n channel = DEFAULT_RADIO_CHANNEL\n\n bitmask = 'EMBER_TRUST_CENTER_GLOBAL_LINK_KEY'\n bitmask += '|EMBER_HAVE_NETWORK_KEY'\n bitmask += '|EMBER_HAVE_PRECONFIGURED_KEY'\n bitmask += '|EMBER_REQUIRE_ENCRYPTED_KEY'\n from constants import PRECONFIGURED_NETWORK_KEY\n preconfigured = PRECONFIGURED_NETWORK_KEY\n from constants import NETWORK_KEY_BIT_SIZE\n network_key = getrandbits(NETWORK_KEY_BIT_SIZE) # TODO: maybe 0??\n\n await self.api.set_initial_security_state({\n \"bitmask\": bitmask,\n \"preconfiguredKey\": preconfigured,\n \"networkKey\": network_key,\n \"networkKeySequenceNumber\": 0,\n \"preconfiguredTrustCenterEui64\": 0})\n\n await self.api.form_network({\n 'extendedPanId': epanid,\n 'panId': panid,\n 'radioTxPower': 0,\n 'radioChannel': channel,\n 'joinMethod': join_method,\n 'nwkManagerId': 0,\n 'nwkUpdateId': 0,\n 'channels': 134215680})\n\n LOGGER.debug(\"Waiting for connection...\")\n await self._wait_for_stack_status('EMBER_NETWORK_UP')\n\n self.network_address = (await self.api.get_node_id())['nodeId']\n self.mac_address = (await self.api.get_eui64())['eui64']\n LOGGER.info(f\"Created {panid:X} ({epanid:X}/{channel})\")\n\n async def ncp_configure(self):\n \"\"\"Configures the NCP\"\"\"\n # try:\n # await self.api.set_value(\n # e.EzspValueId.EZSP_VALUE_STACK_TOKEN_WRITING,\n # 1)\n # except UnexpectedStatusException:\n # pass\n # token = e.EzspValueId.EZSP_VALUE_STACK_TOKEN_WRITING\n # LOGGER.info(await self.api.get_value(token))\n for policy_id, policy_value in self.configuration['policy'].items():\n await self.api.set_policy(policy_id, policy_value)\n\n async def network_scan(self, channels=ALL_CHANNEL_MASK):\n \"\"\"Scan for available devices\"\"\"\n await self.api.start_scan('EZSP_ACTIVE_SCAN', ALL_CHANNEL_MASK, 5)\n\n async def permit_joining(self, duration=ENABLE_JOINING):\n await self.api.permit_joining(duration)\n\n async def process(self):\n \"\"\"Background process\"\"\"\n await self.api.process(self._is_ready_future)\n\n async def print_current_config(self):\n \"\"\"Prints the value of valid config fields\"\"\"\n for _, config in e.EzspConfigId.decmapping.items():\n value = await self.api.get_configuration_value(config)\n LOGGER.info(f\"{config} == {value}\")\n\n def cb_stack_status(self, args):\n LOGGER.info(f\"Stack status: {self._stack_status} → {args['status']}\")\n self._stack_status = args['status']\n def cb_joining(self, args):\n LOGGER.info(\"Joining: \")\n for entry in print_dic(args):\n LOGGER.info(entry)\n\n def cb_incoming_message(self, args):\n LOGGER.info(\"Incoming: \")\n for entry in print_dic(args):\n LOGGER.info(entry)\n\n\ncinder = Cinder()\n\n\nasync def main_process():\n await cinder.is_ready()\n # await self.print_current_config()\n await cinder.network_connect(panid=0xBABF)\n msg = f\"Connected as 0x{cinder.network_address:02X}\"\n msg += f\" (0x{cinder.mac_address:08X})\"\n LOGGER.info(msg)\n await cinder.ncp_configure()\n # await cinder.network_scan()\n await cinder.permit_joining()\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n\n coro = asyncio.gather(\n cinder.process(),\n main_process(),\n )\n try:\n loop.run_until_complete(coro)\n except IOError as e:\n LOGGER.error(f\"Connexion lost: {str(e)}\")\n except OSError:\n LOGGER.error(\"Failed to connect to the NCP\")\n # except Exception as e:\n # LOGGER.error(\"Exiting {}\".format(e))\n\n # try:\n # loop.stop()\n # loop.close()\n # except RuntimeError:\n # LOGGER.error(\"Forcing tasks to shutdown\")\n\n LOGGER.info(\"Done\")\n","sub_path":"src/cinder/cinder.py","file_name":"cinder.py","file_ext":"py","file_size_in_byte":8493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"153693382","text":"QuantidadeDeNumeros = int(input(\"Quantos numeros serao comparados? \"))\nPrimeiroNumero = float(input(\"Qual seu numero? \"))\nMaiorValor = PrimeiroNumero\nMenorValor = PrimeiroNumero\nSomaForaDoWhile = 0\nSomaDentroDoWhile = 0\ncont = 1\nwhile cont < QuantidadeDeNumeros:\n Numero = float(input(\"Qual seu numero? \"))\n if Numero > MaiorValor:\n MaiorValor = Numero\n if Numero < MenorValor:\n MenorValor = Numero\n cont = cont + 1\n SomaDentroDoWhile = SomaDentroDoWhile + Numero\nSomaForaDoWhile = SomaDentroDoWhile + PrimeiroNumero \nprint(\"Seu menor valor foi\",MenorValor,\"seu maior valor foi\",MaiorValor,\"e sua soma foi\",SomaForaDoWhile)","sub_path":"Lista 3/lista03ex14.py","file_name":"lista03ex14.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"443613602","text":"#!/usr/bin/env python\nfrom numpy import *\nimport sys\n\n# read velocites from an .xyz-file and store them to separate\n# files for easier access\n\n\ntimestep = int(sys.argv[2])\nN = 8*8*8*4\n\nfilename = sys.argv[1]\nf = open(filename, 'r')\ndata = f.readlines()\nvx = []\nvy = []\nvz = []\nfor line in data[(timestep-1)*N + 2*timestep : (timestep-1)*N + 2*timestep + N]:\n numbers = line.split()\n vx.append(numbers[4]) \n vy.append(numbers[5])\n vz.append(numbers[6])\n\nxfile = open('vxdata.dat', 'w')\nyfile = open('vydata.dat', 'w')\nzfile = open('vzdata.dat', 'w')\n\nfor i in vx:\n xfile.write(i + \"\\n\")\nfor j in vy:\n yfile.write(j + \"\\n\")\nfor k in vz:\n zfile.write(k + \"\\n\")\n\n","sub_path":"FYS4460/prosjekt1/velocities/readfile.py","file_name":"readfile.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"432304036","text":"import html2text\nimport datetime\nfrom flask import render_template\nfrom flask_mail import Message\nfrom webapp import mail, models, db\nfrom webapp.common import random_pwd\n\n\ndef send_email(recipients, template, template_ctx, subject=None, funnel_stream_id=None):\n msg = Message()\n\n # find recipients and check if they are unsubscribed\n for recipient in recipients:\n user = models.User.query.filter_by(email=recipient).first()\n if not user:\n user = models.UserLegacy.query.filter_by(email=recipient).first()\n if not user:\n user = models.UserLegacy(email=recipient)\n db.session.add(user)\n if user.unsubscribed:\n # Just log that this email is not sent due to unsubscribed status and return\n sent_email = models.SentEmail(timestamp=datetime.datetime.utcnow(),\n recipients=str(recipients),\n subject=\"USER HAS UNSUBSCRIBED\")\n db.session.add(sent_email)\n db.session.commit()\n return\n if not user.unsubscribe_token:\n user.unsubscribe_token = random_pwd(26)\n db.session.add(user)\n db.session.commit()\n template_ctx['unsubscribe_token'] = user.unsubscribe_token\n\n # generate tracking pixel\n tracking_pixel_id = random_pwd(26)\n template_ctx['tracking_pixel_id'] = tracking_pixel_id\n\n # set up email\n msg.subject = subject\n msg.recipients = recipients\n msg.html = render_template(template, **template_ctx)\n msg.body = html2text.html2text(msg.html)\n\n # Actually send email\n mail.send(msg)\n\n # find if this is for a shop\n shop_id = None\n if template == 'email/review_order.html':\n if 'shop_name' in template_ctx:\n shop_name = template_ctx['shop_name']\n shop = models.Shop.query.filter_by(name=shop_name).first()\n if shop:\n shop_id = shop.id\n\n # LOG\n sent_email = models.SentEmail(timestamp=datetime.datetime.utcnow(),\n recipients=str(recipients),\n subject=subject,\n template=template,\n template_ctx=str(template_ctx),\n body=msg.body,\n tracking_pixel_id=tracking_pixel_id,\n for_shop_id=shop_id,\n funnel_stream_id=funnel_stream_id)\n\n db.session.add(sent_email)\n db.session.commit()\n","sub_path":"async/email_sender.py","file_name":"email_sender.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"269438136","text":"# Manual rebalance strategy: Create new shard servers manually via /scripts/shard.sh\n# and master runs this script when it receives a rebalance request\n\n# NOTE: Might have some complex issues due to rebalancing while accepting requests\n\nimport sys\nimport lmdb\nimport hashlib\nimport requests \n\ndef shard_put(filepath, data):\n try:\n req = requests.put(filepath, data)\n # 201 is a new file, 204 is an overwrite\n response_code = req.status_code in [201, 204]\n except Exception as e:\n print(e)\n return False\n return response_code\n\ndef shard_delete(filepath):\n try:\n req = requests.delete(filepath)\n ret = req.status_code == 204\n except Exception:\n return False\n return ret\n\ndef key_to_shard(key, shards):\n pass\n\ndef key_to_path(key):\n md5 = hashlib.md5(key.encode('utf-8')).digest()\n # encode the value into a base64 string as the file name. Inside\n # the file is the unencoded value\n b64key = base64.b64encode(key.encode('utf-8')).decode('utf-8')\n\n return \"/%02x/%02x/%s\" % (md5[0], md5[1], b64key)\n\n# Example call: python3 rebalance.py localhost:3001,localhost:3002 /tmp/cachedb\n\nif __name__ == '__main__':\n shards = sys.argv[1].split(',')\n cachedir = sys.argv[2]\n\n fc = lmdb.open(cachedir)\n if not fc:\n sys.exit('Unable to locate lmdb dump in %s. Aborting.' % cachedir)\n \n # since we use consistent hashing for sharding, this rebalance operation\n # is quite costly. Re-hashing each existing key is likely going to put it\n # in a different location\n\n with fc.begin(write=True) as txn:\n cursor = txn.cursor()\n # Iterate over all current key/values and rehash\n moved, total = 0, 0\n for key, value in cursor:\n remote = key_to_shard(key, shards)\n path = key_to_path(key)\n # unlink file in old shard and place in in new location if \n # hash is different. do nothing otherwise\n new_shard_loc = 'http://%s%s' % (remote, path)\n if new_shard_loc != value:\n # get file content\n print('Moving key %s at location %s...' % (key, value))\n\n txn.delete(key.encode('utf-8'))\n content = requests.get(value)\n txn.put(key.encode('utf-8'), json.dumps(content).encode('utf-8'))\n shard_delete(value)\n shard_put(new_shard_loc, content)\n \n print('New key %s at location %s' % (key, new_shard_loc))\n moved += 1\n else:\n print('Key %s is unchanged' % (key))\n total += 1\n print('%d out of %d were redistributed.' % (moved, total))","sub_path":"src/rebalance.py","file_name":"rebalance.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"613907904","text":"__author__ = 'Sai'\n\n# API Keys: 658602f30bc7c402cda256632cac6c7f\n# Request format: https://proapi.whitepages.com/2.0/business.json?name=toyota&city=Seattle&api_key=KEYVAL\n\nimport sys\nimport requests\n\n\nclass WhitePages:\n\n name = sys.argv[1]\n city = sys.argv[2]\n state_code = sys.argv[3]\n\n def businessSearch(self):\n\n if(self.state_code is not None and self.city is not None and self.name is not None):\n\n req = \"https://proapi.whitepages.com/2.0/business.json?name=\"+self.name+\"&city=\"+self.city+\"&state_code=\"+self.state_code+\"&api_key=658602f30bc7c402cda256632cac6c7f\"\n res = requests.get(req)\n\n #print(res.text)\n print(\"\")\n response = res.json()\n\n key1 = ''.join(str(response['results'][0]))\n name = response['dictionary'][key1]['name']\n key2 = ''.join(str(response['dictionary'][key1]['locations'][0]['id']['key']))\n address1 = response['dictionary'][key2]['standard_address_line1']\n address2 = response['dictionary'][key2]['standard_address_line2']\n location = response['dictionary'][key2]['standard_address_location']\n address = address1 + \" \" +address2 + \" \" +location\n key3 = response['dictionary'][key1]['phones'][0]['id']['key']\n phone = response['dictionary'][key3]['phone_number']\n results = {'name':name,'address':address,'phone':phone}\n\n print(results)\n\n print(\"\")\n\nx = WhitePages()\nx.businessSearch()\n\n","sub_path":"APIs/WhitePages.py","file_name":"WhitePages.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"615274677","text":"import pymysql as PyMySQL\n\n\ndef open_connection():\n db = PyMySQL.connect(\"localhost\", \"root\", \"mysql\", \"edubot\")\n return db\n\n\ndef close_connection(db):\n db.close()\n print(\"db connection closed\")\n\n\ndef getID(db, cursor):\n num = 0\n sql = \"SELECT max(id) FROM noteshistory\"\n id = 0\n try:\n cursor.execute(sql)\n num = cursor.fetchone()\n id = num[0] + 1\n db.commit()\n except:\n db.rollback()\n return id\n\n\ndef insert_into_note_list(title, body, date, status):\n db = open_connection()\n cursor = db.cursor()\n id = getID(db, cursor)\n sql = \"INSERT INTO noteshistory (id, title, text, time, status) VALUES (\" + str(\n id) + \", '\" + str(title) + \"', '\" + str(body) + \"', '\" + str(date) + \"', '\" + str(status) + \"')\"\n try:\n cursor.execute(sql)\n db.commit()\n except:\n db.rollback()\n close_connection(db)\n\n\ndef select_all_notes():\n db = open_connection()\n cursor = db.cursor()\n sql = \"SELECT * FROM noteshistory ORDER BY id DESC\"\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n print(result)\n print(\"\\n\")\n d = dict()\n keys = ['id', 'title', 'text', 'time', 'status']\n ret = ([dict(zip(keys, row)) for row in result])\n db.commit()\n return ret\n except:\n db.rollback()\n close_connection(db)\n\n\ndef select_note(id):\n db = open_connection()\n cursor = db.cursor()\n sql = \"SELECT * FROM noteshistory WHERE id=\" + str(id)\n try:\n cursor.execute(sql)\n result = cursor.fetchall()\n print(result)\n d = dict()\n keys = ['id', 'title', 'text', 'time', 'status']\n print([dict(zip(keys, row)) for row in result])\n db.commit()\n except:\n db.rollback()\n close_connection(db)\n\n\ndef update_record(id, title, text):\n db = open_connection()\n cursor = db.cursor()\n sql = \"UPDATE noteshistory SET title='\" + title + \"', text='\" + text + \"' WHERE id=\" + str(id)\n try:\n cursor.execute(sql)\n db.commit()\n except:\n db.rollback()\n close_connection(db)\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"301715412","text":"################################## #\n# Script pour écrire dans un fichier texte !!! #\n# ################################## #\n# Création de la variable \"MonFichier\" qui contient le chemin du fichier cible.\n# Nous utilisons l'option \"w\" pour écrire.\nMonFichier = open('txt/MailSeul.txt', 'w')\n# Ensuite nous utilisons la méthode \"write()\", pour écrire le texte\n# que l'on souhaite.\nMonFichier.write('Bienvenue à l\\'IUT de Châteauroux !!!')\n# Ensuite on referme le fichier.\nMonFichier.close()\n\n\n# ################################## #\n# Script pour lire un fichier DEJA existant !!! #\n# ################################## #\n# Création de la variable \"MonFichier\" qui contient le chemin du fichier cible.\n# Nous utilisons l'option \"r\" pour le lire.\n# Toutefois, cette option est mise par défaut.\nMonFichier = open('txt/MailSeul.txt', 'r')\n# Ensuite nous utilisons la variable \"ContenuFichier\" pour lire\n# le contenu du fichier. Nous utilisons pour cela la méthode \"read()\".\nContenuFichier = MonFichier.read()\n# Ensuite on referme le fichier.\nMonFichier.close()\n# Si vous ne rajoutez par cette ligne, il n'y aura rien d'affiché dans la fenêtre.\nprint(ContenuFichier)\n","sub_path":"create-readfile.py","file_name":"create-readfile.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"587236783","text":"# Write your solution for 1.4 here!\n\ndef is_prime(x):\n\tprime = True\n\tfor i in range(2,x):\n\t\tif (x%i==0):\n\t\t\tprime=False\n\t\t# Code\n\treturn prime\n\n\nprint(is_prime(2))","sub_path":"exercises/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"630996832","text":"import gym\nfrom gym.envs.registration import register\n\nfrom stable_baselines3 import PPO\nfrom stable_baselines3.common.env_util import make_vec_env\nfrom stable_baselines3.ppo.policies import MlpPolicy\n\nfrom datetime import datetime\n\nnow = datetime.now()\ncurrent_time = now.strftime(\"%d_%m_%Y_%H_%M_%S\")\nfilename = \"ppo_warehouse_sort_\"+ str(current_time)\nprint(\"Model will be saved at \", filename)\nclass CustomPolicy(MlpPolicy):\n def __init__(self, *args, **kwargs):\n super(CustomPolicy, self).__init__(*args, **kwargs,\n net_arch=[64, 64, dict(pi=[64, 64], vf=[64, 64])])\n\n def _get_torch_save_params(self):\n state_dicts = [\"policy\", \"policy.optimizer\", \"policy.lr_scheduler\"]\n\n return state_dicts, []\n\nregister(\n id='warehouse-sort-v0', \n entry_point='gym_multigrid.envs:WarehouseSortEnvN1'\n)\n\nenv = gym.make('warehouse-sort-v0')\n\nmodel = PPO(CustomPolicy, env, verbose=1)\nmodel.learn(total_timesteps=10000)\nmodel.save(filename)\n\nobs = env.reset()\nwhile True:\n action, _states = model.predict(obs)\n obs, rewards, dones, info = env.step(action)\n env.render()\n\n if dones:\n env.reset()","sub_path":"ppo_train.py","file_name":"ppo_train.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"82016091","text":"\"\"\"\r\nPre-processing data:\r\n\r\n* input: .csv file\r\n* output: .csv file\r\n\r\n1. Features:\r\n - Drop RISK_MM\r\n - Separate 'Date' columns into 3 new columns: 'Year', 'Month', 'Date'\r\n2. Missing data:\r\n - Numeric: replaced by mean value.\r\n - Nominal (categorical): replaced by popular string.\r\n - Drop null (option)\r\n3. Outliers:\r\n - Using IQR\r\n - Z-score (option)\r\n4. String to int (categorical columns):\r\n - One hot encoding\r\n - Label encoder\r\n5. Normalization\r\n - Min-max\r\n - Standard (option)\r\n\r\n\"\"\"\r\n\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom scipy import stats\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\r\n\r\n\r\nclass Preprocessing:\r\n\r\n def __init__(self, output_file):\r\n self.output_file = output_file\r\n\r\n def preprocessing(self, input_file):\r\n data = pd.read_csv(input_file)\r\n\r\n # drop features\r\n data = data.drop(columns=['RISK_MM'])\r\n\r\n # remove 'Date' column\r\n # create 3 new columns: date, month, year\r\n data['Date'] = pd.to_datetime(data['Date'])\r\n data['Year'] = data['Date'].dt.year\r\n data['Month'] = data['Date'].dt.month\r\n data['Day'] = data['Date'].dt.day\r\n data.drop('Date', axis=1, inplace=True)\r\n\r\n # separate columns to numeric and nominal(categorical)\r\n numeric = []\r\n nominal = []\r\n list_columns = data.columns.tolist()\r\n for i in list_columns:\r\n if data[i].dtypes == 'float64' or data[i].dtypes == 'int64':\r\n numeric.append(i)\r\n else:\r\n nominal.append(i)\r\n\r\n # with null data in numeric columns, we can replace it by mean value of column\r\n for i in numeric:\r\n mean = data[i].mean()\r\n data[i].replace(to_replace=np.nan, value=mean, inplace=True)\r\n\r\n # with null data in nominal columns, we can replace it by popular string\r\n for i in nominal:\r\n popular_str = data[i].mode()[0]\r\n data[i].replace(to_replace=np.nan, value=popular_str, inplace=True)\r\n\r\n # another method to deal with null data -> drop all null data\r\n # drop_null\r\n # data = data.dropna()\r\n\r\n # outliers\r\n # create a list include features that maybe contain outliers\r\n outliers = ['Rainfall', 'WindSpeed9am', 'WindSpeed3pm', 'Evaporation']\r\n\r\n # detect and remove outliers by using z-score\r\n # z = np.abs(stats.zscore(data))\r\n # data = data[(z < 3).all(axis=1)]\r\n\r\n # detect and remove outliers by using IQR\r\n for i in outliers:\r\n IQR = data[i].quantile(0.75) - data[i].quantile(0.25)\r\n lower_fence = data[i].quantile(0.25) - (IQR * 1.5)\r\n upper_fence = data[i].quantile(0.75) + (IQR * 1.5)\r\n data[i] = np.where(data[i] < lower_fence, lower_fence, data[i])\r\n data[i] = np.where(data[i] > upper_fence, upper_fence, data[i])\r\n\r\n # one_hot_encoding\r\n # convert to int\r\n # all categorical columns\r\n # data = pd.get_dummies(data, columns=nominal)\r\n\r\n # label_encoder\r\n # covert label to int\r\n # Yes -> 1, No -> 0\r\n le = LabelEncoder()\r\n for i in nominal:\r\n data[i] = le.fit_transform(data[i])\r\n # normalization\r\n\r\n # min-max\r\n scale = MinMaxScaler()\r\n data = pd.DataFrame(scale.fit_transform(data), columns=data.columns)\r\n\r\n # standard\r\n # scale = StandardScaler()\r\n # data = pd.DataFrame(scale.fit_transform(data), columns=data.columns)\r\n\r\n # output_data\r\n data.to_csv(self.output_file, index=False, header=True)\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Project+GUI/Preprocessing.py","file_name":"Preprocessing.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"229288504","text":"##############\n## Script listens to serial port and writes contents into a file\n##############\n## requires pySerial to be installed\nimport serial\n\nimport serial.tools.list_ports\ncomlist = serial.tools.list_ports.comports()\nconnected = []\nfor element in comlist:\n connected.append(element.device)\n if element.device.find(\"usbmodem\") != -1:\n print(element.device.find(\"usbmodem\"))\n serial_port = element.device\nprint(\"Connected COM ports: \" + str(connected))\n\n\nimport pandas as pd\ndf = pd.DataFrame()\n\n#serial_port = '/dev/cu.usbmodem14111';\nbaud_rate = 31250; #In arduino, Serial.begin(baud_rate)\n#write_to_file_path = \"output.txt\";\n\n#output_file = open(write_to_file_path, \"w+\");\nser = serial.Serial(serial_port, baud_rate)\nwhile True:\n line = ser.readline();\n line = line.decode(\"utf-8\") #ser.readline returns a binary, convert to string\n print(line);\n #df['test'] = line\n #print(df.head())\n ##output_file.write(line);\n #print(\"\\033c\")","sub_path":"listen_serial.py","file_name":"listen_serial.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"139066570","text":"from multiprocessing import Process,Lock\nimport time\n\n\n'''\nLock,当两个进程同时暂用资源的时候,会出现互斥的现象,可以通过加锁的操作。\n当一个进程正在进行的时候对这个资源进行加锁阻止而后的进程继续。\n'''\nclass MyProcess(Process):\n def __init__(self, loop,lock):\n Process.__init__(self)\n self.loop = loop\n self.lock = lock\n\n def run(self):\n for count in range(self.loop):\n time.sleep(0.1)\n self.lock.acquire()#加锁防止进程冲突\n print('Pid:' + str(self.pid) + 'LoopCout' + str(count))\n self.lock.release()#加锁防止进程冲突\n\nif __name__ == \"__main__\":\n lock = Lock()\n for i in range(10,15):\n p = MyProcess(i,lock)\n p.start()\n","sub_path":"多进程基础/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"328927962","text":"#!/usr/bin/env python\nfrom argparse import ArgumentParser\nimport numpy as np\nimport pickle\n\ndef make_band(args):\n\n if args.log:\n E = np.logspace(np.log10(args.elow),np.log10(args.ehigh),num=args.npoints)\n else:\n E = np.linspace(args.elow,args.ehigh,num=args.npoints)\n #set A = 1 for now\n e_break = (args.alpha - args.beta) * args.E0\n e_ref = 100.0\n n_alpha = np.power(E/e_ref,args.alpha)*np.exp(-E/args.E0)\n n_beta = np.power(e_break/e_ref,args.alpha - args.beta) * np.exp(args.beta - args.alpha) * np.power(E/e_ref,args.beta)\n n = np.concatenate([n_alpha[E < e_break], n_beta[E >= e_break]])\n \n if args.showplot:\n from matplotlib import pylab as py\n py.plot(E,n,'k+')\n py.yscale('log')\n py.xscale('log')\n py.xlabel('Energy (keV)')\n py.ylabel('N(E)')\n py.xlim([E[0],E[-1]])\n py.show()\n\n return E,n\n\nif __name__ == '__main__':\n\n parser = ArgumentParser(description='compute the bnd function and pickle it')\n parser.add_argument('alpha',type=float,help='alpha parameter -- low energy PL index')\n parser.add_argument('beta',type=float,help='beta parameter -- high energy PL index')\n parser.add_argument('E0',type=float,help='peak energy')\n parser.add_argument('--elow',type=float,default=50.0,help='lower energy bound, default is 50 keV')\n parser.add_argument('--ehigh',type=float,default=10000.0,help='upper energy bound, default is 10000 keV')\n parser.add_argument('--npoints',type=int,default=200,help='number of E-axis points')\n parser.add_argument('--showplot',action='store_true',default=False)\n parser.add_argument('--log',action='store_true',default=False)\n args = parser.parse_args()\n\n E,n = make_band(args)\n #fname = 'alpha_%.2f_beta_%.2f_E0_%.2f.pickle' % (args.alpha, args.beta, args.E0)\n fname = 'alpha_%.2f_beta_%.2f_E0_%.2f.dat' % (args.alpha, args.beta, args.E0)\n fout = open(fname,'w')\n fout.write('IP LOGLOG\\n')\n for x in zip(E,n):\n fout.write('DP %.6e %.6e\\n' %x)\n fout.write('EN')\n #pickle.dump({'E':E,'n':n}, fout)\n fout.close()\n\n\n\n\n\n\n","sub_path":"atmosphere/band_function.py","file_name":"band_function.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"324762416","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 5 20:44:39 2020\n\n@author: Brooks Hanson\n\"\"\"\n\n#%%\n#Write a function which will find all such numbers which are divisible by 7 \n#but are not a multiple of 5,between 2000 and 3200 (both included). \n#The numbers obtained should be printed as a list.\n\nlst = []\n\nfor i in range(2000,3201):\n if i % 7 == 0 and i % 5 != 0:\n lst.append(i)\n\nprint(lst)\n#%%\n\n#Write a function which can compute the factorial of a given number.\nnumber = int(input(\"Enter number for factorial calculation: \"))\n\ndef factorial(number):\n if number == 0:\n return 1\n #recursion to get to base case\n else:\n return (number * factorial(number-1))\n\nprint(factorial(number))\n#%%\n#Define a class which has at least two methods: getString: to get a string \n#from console input printString: to print the string in all upper case. \n#Also please include a simple test function to test the class methods.\n\nclass StringStuff:\n def __init__(self):\n self.got = self\n \n def getString(self):\n mySTR = str(input(\"Enter string: \"))\n self.got = mySTR\n \n def printString(self):\n upper = self.got.upper()\n return(upper)\n\ndef test():\n m = StringStuff()\n m.getString()\n print(m.printString())\n\ntest()\n#%%\n#Write a function that accepts a sequence of whitespace separated words as \n#input and prints the words after removing all duplicate words and sorting \n#them alphanumerically. Words should be printed as a list.Suppose the \n#following input is supplied to the functionhello world and practice makes \n#perfect and hello world againThen, the output should be:again and hello makes \n#perfect practice world\n\ndef whitespace():\n s = input('Enter words split by spaces ')\n \n words = [word for word in s.split(\" \")]\n \n st = set(words)\n \n l = list(st)\n \n l.sort()\n \n print(l)\n\nwhitespace()\n#%%\n#Write a function which will find all such numbers between 1000 and 3000 \n#(both included) such that each digit of the number is an even number.\n#The numbers obtained should be printed as a list.\n\ndef evenDigitNums(): \n values = []\n for i in range(1000, 3001):\n s = str(i)\n if (int(s[0])%2==0) and (int(s[1])%2==0) and (int(s[2])%2==0) and (int(s[3])%2==0):\n values.append(s)\n print(values)\n\nevenDigitNums() \n#%%\n#Define a triangle class. Create a constructor and a method that returns \n#the area of the triangle. Also please include a simple test function to \n#test the class methods. Area of a triangle = base * height /2\n\nclass Triangle:\n def __init__(self, base, height):\n self.base = base\n self.height = height\n \n def area(self):\n area = self.base * self.height / 2\n return(area)\n\nU = Triangle(3,4)\nstr(U)\ndef testT(b,h):\n m = Triangle(b,h)\n print(m.area())\n\ntestT(3,9)\n\n#%%","sub_path":"Python/Data Structures/FinalPractice.py","file_name":"FinalPractice.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"19122494","text":"from collections import deque\n\nN, x, y = map(int, input().split())\nA = [int(i) for i in input().split()]\n\n# grid = [[False] * (2 * (10 ** 4)) for _ in range(2 * (10 ** 4))]\n#\n# que = deque()\n# que.append((10 ** 4, 10 ** 4))\n# prev_h = 10 ** 4\n# prev_w = 10 ** 4 + A[0]\n# cnt = 1\n# while not que:\n# h, w = que.popleft()\n# if h > 2 * (10 ** 4) or w > 2 * (10 ** 4) or h < 0 or w < 0:\n# continue\n#\n# grid[h][w] = True\n#\n# if prev_w - w == 0:\n# que.append((h, w + A[cnt]))\n# que.append((h, w - A[cnt]))\n# if prev_h - h == 0:\n# que.append((h + A[cnt], w))\n# que.append((h - A[cnt], w))\n# cnt += 1\n#\n# if grid[y][x]:\n# print(\"Yes\")\n# else:\n# print(\"No\")\nM = 10 ** 4\n# x, yは独立に解いて良い\n# nが奇数ならx軸のdp, 偶数ならy軸のdpで解ける\ndp_x = [False] * (2 * M + 1)\ndp_y = [False] * (2 * M + 1)\ndp_x[A[0]] = True\ndp_y[0] = True\n\n\nfor n in range(N):\n next_dp = [False] * (2 * M + 1)\n a = A[n]\n if n % 2 == 0:\n for j in range(-M, M + 1):\n next_dp[j + a] = next_dp[j + a] or dp_y[j]\n next_dp[j] = next_dp[j] or dp_y[j + a]\n next_dp, dp_y = dp_y, next_dp\n else:\n for j in range(-M, M + 1):\n next_dp[j + a] = next_dp[j + a] or dp_x[j]\n next_dp[j] = next_dp[j] or dp_x[j + a]\n next_dp, dp_x = dp_x, next_dp\n\nprint(\"Yes\" if dp_y[y] and dp_x[x] else \"No\")\n","sub_path":"python/ABC251_300/ABC274/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"22452882","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis model unifies the training of decoder, latent encoder, latent predictor\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os, sys\nimport time\nimport importlib\nimport torch\nfrom torch import optim\nsys.path.append(\".\")\n\nimport nmtlab\nfrom nmtlab import MTTrainer, MTDataset\nfrom nmtlab.utils import OPTS, Vocab\nfrom nmtlab.schedulers import TransformerScheduler, SimpleScheduler\nfrom nmtlab.utils import is_root_node\nfrom nmtlab.utils.monitor import trains_stop_stdout_monitor, trains_restore_stdout_monitor\nfrom argparse import ArgumentParser\n\nfrom lanmt.lib_latent_encoder import LatentEncodingNetwork\nfrom lib_ebm_lm import EnergyLanguageModel\nfrom datasets import get_dataset_paths\n\nDATA_ROOT = \"./mydata\"\nPRETRAINED_MODEL_MAP = {\n \"wmt14_ende\": \"{}/shu_trained_wmt14_ende.pt\".format(DATA_ROOT),\n \"aspec_jaen\": \"{}/shu_trained_aspec_jaen.pt\".format(DATA_ROOT),\n}\nTRAINING_MAX_TOKENS = 60\n\nap = ArgumentParser()\nap.add_argument(\"--root\", type=str, default=DATA_ROOT)\nap.add_argument(\"--resume\", action=\"store_true\")\nap.add_argument(\"--test\", action=\"store_true\")\nap.add_argument(\"--train\", action=\"store_true\")\nap.add_argument(\"--evaluate\", action=\"store_true\")\nap.add_argument(\"-tb\", \"--tensorboard\", action=\"store_true\")\nap.add_argument(\"--opt_dtok\", default=\"\", type=str, help=\"dataset token\")\nap.add_argument(\"--opt_seed\", type=int, default=3, help=\"random seed\")\n\n# Commmon option for both autoregressive and non-autoregressive models\nap.add_argument(\"--opt_batchtokens\", type=int, default=4096)\nap.add_argument(\"--opt_hiddensz\", type=int, default=512)\nap.add_argument(\"--opt_embedsz\", type=int, default=512)\nap.add_argument(\"--opt_heads\", type=int, default=8)\nap.add_argument(\"--opt_longertrain\", action=\"store_true\")\nap.add_argument(\"--opt_x3longertrain\", action=\"store_true\")\nap.add_argument(\"--opt_disentangle\", action=\"store_true\")\n\n# Options for LANMT\nap.add_argument(\"--opt_latentdim\", default=256, type=int, help=\"dimension of latent variables\")\nap.add_argument(\"--opt_distill\", action=\"store_true\", help=\"train with knowledge distillation\")\n\n\n# Paths\nap.add_argument(\"--model_path\",\n default=\"{}/lm.pt\".format(DATA_ROOT))\nap.add_argument(\"--result_path\",\n default=\"{}/lm.result\".format(DATA_ROOT))\nOPTS.parse(ap)\n\nOPTS.fix_bug1 = True\nOPTS.fix_bug2 = False\nOPTS.model_path = OPTS.model_path.replace(DATA_ROOT, OPTS.root)\nOPTS.result_path = OPTS.result_path.replace(DATA_ROOT, OPTS.root)\n\n# Determine the number of GPUs to use\nhorovod_installed = importlib.util.find_spec(\"horovod\") is not None\nif torch.cuda.is_available() and horovod_installed:\n import horovod.torch as hvd\n hvd.init()\n torch.cuda.set_device(hvd.local_rank())\n part_index = hvd.rank()\n part_num = hvd.size()\n gpu_num = hvd.size()\nelse:\n part_index = 0\n part_num = 1\n gpu_num = 1\n\n# Tensorboard Logging\ntb_logdir = None\nOPTS.trains_task = None\nif is_root_node():\n print(\"Running on {} GPUs\".format(gpu_num))\n if OPTS.tensorboard:\n try:\n from trains import Task\n task = Task.init(project_name=\"EBM_LM\", task_name=OPTS.result_tag, auto_connect_arg_parser=False)\n task.connect(ap)\n task.set_random_seed(OPTS.seed)\n OPTS.trains_task = task\n except SystemError as e:\n print(e)\n pass\n tb_logdir = os.path.join(OPTS.root, \"tensorboard\")\n if not os.path.exists(tb_logdir):\n os.mkdir(tb_logdir)\n\n# Get the path variables\n(\n train_src_corpus,\n train_tgt_corpus,\n distilled_tgt_corpus,\n truncate_datapoints,\n test_src_corpus,\n test_tgt_corpus,\n ref_path,\n src_vocab_path,\n tgt_vocab_path,\n n_valid_per_epoch,\n training_warmsteps,\n training_maxsteps,\n pretrained_autoregressive_path\n) = get_dataset_paths(OPTS.root, OPTS.dtok)\n\nif OPTS.longertrain:\n training_maxsteps = int(training_maxsteps * 1.5)\nif OPTS.x3longertrain:\n training_maxsteps = int(training_maxsteps * 3)\n\nif nmtlab.__version__ < \"0.7.0\":\n print(\"lanmt now requires nmtlab >= 0.7.0\")\n print(\"Update by pip install -U nmtlab\")\n sys.exit()\n\n# Define dataset\nif OPTS.distill:\n tgt_corpus = distilled_tgt_corpus\nelse:\n tgt_corpus = train_tgt_corpus\n\n\nn_valid_samples = 5000 if OPTS.finetune else 500\nif OPTS.train:\n dataset = MTDataset(\n src_corpus=train_src_corpus, tgt_corpus=tgt_corpus,\n src_vocab=src_vocab_path, tgt_vocab=tgt_vocab_path,\n batch_size=OPTS.batchtokens * gpu_num, batch_type=\"token\",\n truncate=truncate_datapoints, max_length=TRAINING_MAX_TOKENS,\n n_valid_samples=n_valid_samples)\nelse:\n dataset = None\n\n# Create the model\nbasic_options = dict(\n dataset=dataset,\n src_vocab_size=Vocab(src_vocab_path).size(),\n tgt_vocab_size=Vocab(tgt_vocab_path).size(),\n hidden_size=OPTS.hiddensz, embed_size=OPTS.embedsz,\n n_att_heads=OPTS.heads, shard_size=OPTS.shard,\n seed=OPTS.seed\n)\n\n# lanmt_options = basic_options.copy()\n# lanmt_options.update(dict(\n# latent_dim=OPTS.latentdim,\n# KL_budget=0. if OPTS.finetune else OPTS.klbudget,\n# max_train_steps=training_maxsteps,\n# ))\n#\n# vae = LatentEncodingNetwork(**lanmt_options)\n# vae_path = \"{}/data/wmt14_ende_fair/lacoder_batchtokens-8192_distill_dtok-wmt14_fair_ende_klbudget-15.0_latentdim-{}.pt\".format(os.getenv(\"HOME\"), OPTS.latentdim)\n# if OPTS.disentangle:\n# vae_path = vae_path.replace(\"_distill\", \"_disentangle_distill\")\n# print(\"loading\", vae_path)\n# assert os.path.exists(vae_path)\n# vae.load(vae_path)\n# if torch.cuda.is_available():\n# vae.cuda()\n\nnmt = EnergyLanguageModel(latent_size=OPTS.latentdim)\n\n\n# Training\nif OPTS.train or OPTS.all:\n # Training code\n scheduler = SimpleScheduler(max_epoch=20)\n # scheduler = TransformerScheduler(warm_steps=training_warmsteps, max_steps=training_maxsteps)\n optimizer = optim.Adam(nmt.parameters(), lr=0.0001, betas=(0.9, 0.98), eps=1e-4)\n\n trainer = MTTrainer(\n nmt, dataset, optimizer,\n scheduler=scheduler, multigpu=gpu_num > 1,\n using_horovod=horovod_installed)\n OPTS.trainer = trainer\n trainer.configure(\n save_path=OPTS.model_path,\n n_valid_per_epoch=n_valid_per_epoch,\n criteria=\"loss\",\n tensorboard_logdir=tb_logdir,\n # clip_norm=0.1 if OPTS.scorenet else 0\n )\n if OPTS.resume:\n trainer.load()\n trains_stop_stdout_monitor()\n trainer.run()\n trains_restore_stdout_monitor()\n\n# Translation\nif OPTS.test or OPTS.all:\n # Translate using only one GPU\n if not is_root_node():\n sys.exit()\n torch.manual_seed(OPTS.seed)\n model_path = OPTS.model_path\n if not os.path.exists(model_path):\n print(\"Cannot find model in {}\".format(model_path))\n sys.exit()\n nmt.load(model_path)\n if torch.cuda.is_available():\n nmt.cuda()\n nmt.train(False)\n src_vocab = Vocab(src_vocab_path)\n tgt_vocab = Vocab(tgt_vocab_path)\n\n # Testing for langauge model\n lines = open(test_tgt_corpus).readlines()\n first_line = lines[0]\n first_line = \"Gut@@ ach : Noch ach Sicherheit ach Fußgän@@ ger .\"\n # first_line = \"ach ach .\"\n print(first_line)\n first_line_tokens = tgt_vocab.encode(\" {} \".format(first_line.strip()).split())\n input = torch.tensor([first_line_tokens])\n if torch.cuda.is_available():\n input = input.cuda()\n # z = vae.compute_codes(input)\n z = nmt.compute_prior_states(input)\n # z = torch.zeros((1, 6, OPTS.latentdim))\n mask = torch.ones((1, z.shape[1]))\n if torch.cuda.is_available():\n mask = mask.cuda()\n z = z.cuda()\n init_z = z.clone()\n for _ in range(10):\n z, tokens = nmt.refine(z, mask, n_steps=1, step_size=0.5, return_tokens=True)\n # z[:, 0] = init_z[:, 0]\n # z[:, -1] = init_z[:, -1]\n line = tgt_vocab.decode(tokens[0])\n print(\" \".join(line))\n raise SystemExit\n\n\n result_path = OPTS.result_path\n # Read data\n lines = open(test_tgt_corpus).readlines()\n trains_stop_stdout_monitor()\n with open(OPTS.result_path, \"w\") as outf:\n for i, line in enumerate(lines):\n # Make a batch\n tokens = tgt_vocab.encode(\" {} \".format(line.strip()).split())\n x = torch.tensor([tokens])\n if torch.cuda.is_available():\n x = x.cuda()\n mask = torch.ne(x, 0).float()\n # Compute codes\n codes = nmt.compute_codes(x)\n tokens = nmt.compute_tokens(codes, mask)\n # Predict latent and target words from prior\n target_tokens = tokens.cpu().numpy()[0].tolist()\n # Convert token IDs back to words\n target_tokens = [t for t in target_tokens if t > 2]\n target_words = tgt_vocab.decode(target_tokens)\n target_sent = \" \".join(target_words)\n import pdb;pdb.set_trace()\n outf.write(target_sent + \"\\n\")\n sys.stdout.write(\"\\rtranslating: {:.1f}% \".format(float(i) * 100 / len(lines)))\n sys.stdout.flush()\n sys.stdout.write(\"\\n\")\n trains_restore_stdout_monitor()\n\n\n","sub_path":"archive/lm.bak.py","file_name":"lm.bak.py","file_ext":"py","file_size_in_byte":9192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"602573841","text":"# -*- coding: utf-8 -*-\n\"\"\"bourseLibre URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog__ import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog__/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.urls import path\nfrom . import views\nfrom django.views.generic import TemplateView\n#from fcm_django.api.rest_framework import FCMDeviceAuthorizedViewSet\n\n# On import les vues de Django, avec un nom spécifique\nfrom django.contrib.auth.decorators import login_required\n\n# admin.autodiscover()\nfrom django.contrib import admin\n\nfrom wiki import urls\n\nadmin.sites.site_header =\"Admin \"\nadmin.sites.site_title =\"Admin Permacat\"\n\n\nurlpatterns = [\n url(r'^tinymce/', include('tinymce.urls')),\n url(r'^summernote/', include('django_summernote.urls')),\n url(r'^captcha/', include('bourseLibre.captcha_local.urls')),\n url('^', include('django.contrib.auth.urls')),\n url(r'^$', views.bienvenue, name='bienvenue'),\n url(r'^bienvenue/$', views.bienvenue, name='bienvenue'),\n url(r'^faq/$', views.faq, name='faq'),\n url(r'^gallerie/$', views.gallerie, name='gallerie'),\n url(r'^permacat/admin/$', views.admin_asso, name='admin_asso'),\n url(r'^RTG/admin/$', views.admin_asso_rtg, name='admin_asso_rtg'),\n url(r'^permacat/fichiers/$', views.telechargements_asso, name='telechargements_asso'),\n url(r'^permacat/adhesion_asso/$', views.adhesion_asso, name='adhesion_asso'),\n url(r'^notifications/$', views.notifications, name='notifications'),\n url(r'^notificationsParDate/$', views.notificationsParDate, name='notificationsParDate'),\n url(r'^dernieresInfos/$', views.dernieresInfos, name='dernieresInfos'),\n url(r'^prochaines_rencontres/$', views.prochaines_rencontres, name='prochaines_rencontres'),\n url(r'^permacat/presentation/$', views.presentation_asso, name='presentation_asso'),\n url(r'^site/presentation/$', views.presentation_site, name='presentation_site'),\n url(r'^permacat/statuts/$', views.statuts, name='statuts'),\n \n url(r'^gestion/', admin.site.urls, name='admin',),\n url(r'^merci/$', views.merci, name='merci'),\n url(r'^forum/', include('blog.urls', namespace='bourseLibre.blog')),\n url(r'^kit/', include('fiches.urls', namespace='bourseLibre.fiches')),\n url(r'^ateliers/', include('ateliers.urls', namespace='bourseLibre.ateliers')),\n # url(r'^search/', include('haystack.urls'), name='chercher_site'),\n #url(r'^search/', include('haystack.urls'), name='haystack_search'),\n url(r'^chercher/produit/$', login_required(views.chercher), name='chercher'),\n url(r'^accounts/profil/(?P[0-9]+)/$', login_required(views.profil), name='profil',),\n url(r'^accounts/profil/(?P[-\\w.]+)/$', login_required(views.profil_nom), name='profil_nom',),\n url(r'^accounts/profile/$', login_required(views.profil_courant), name='profil_courant',),\n url(r'^accounts/profil_inconnu/$', views.profil_inconnu, name='profil_inconnu',),\n url(r'^accounts/profil_modifier/$', login_required(views.profil_modifier.as_view()), name='profil_modifier',),\n url(r'^accounts/profil_supprimer/$', login_required(views.profil_supprimer.as_view()), name='profil_supprimer',),\n url(r'^accounts/profil_modifier_adresse/$', login_required(views.profil_modifier_adresse.as_view()), name='profil_modifier_adresse',),\n url(r'^accounts/profil_contact/(?P[0-9]+)/$', login_required(views.profil_contact), name='profil_contact',),\n url(r'^accounts/mesSuivis$', login_required(views.mesSuivis), name='mesSuivis',),\n url(r'^register/$', views.register, name='senregistrer',),\n #url(r'^password/reset/$', views.reset_password, name='reset_password'),\n url(r'^password/change/$', views.change_password, name='change_password'),\n path('auth/', include('django.contrib.auth.urls')),\n\n url(r'^contact_admins/$', views.contact_admins, name='contact_admins',),\n url(r'^charte/$', views.charte, name='charte',),\n url(r'^cgu/$', views.cgu, name='cgu',),\n url(r'^liens/$', views.liens, name='liens',),\n url(r'^fairedon/$', views.fairedon, name='fairedon',),\n #url(r'^agenda/$', views.agenda, name='agenda',),\n url(r'^cooperateurs/annuaire/$', login_required(views.annuaire), name='annuaire',),\n url(r'^cooperateurs/listeContacts/$', login_required(views.listeContacts), name='listeContacts',),\n url(r'^cooperateurs/listeContacts_rtg/$', login_required(views.listeContacts_rtg), name='listeContacts_rtg',),\n url(r'^cooperateurs/listeFollowers/$', login_required(views.listeFollowers), name='listeFollowers',),\n url(r'^cooperateurs/annuaire_permacat/$', login_required(views.annuaire_permacat), name='annuaire_permacat',),\n url(r'^cooperateurs/annuaire_rtg/$', login_required(views.annuaire_rtg), name='annuaire_rtg',),\n url(r'^cooperateurs/carte/$', login_required(views.carte), name='carte',),\n url(r'^cooperateurs/carte_permacat/$', login_required(views.carte_permacat), name='carte_permacat',),\n url(r'^cooperateurs/carte_rtg/$', login_required(views.carte_rtg), name='carte_rtg',),\n\n url(r'^marche/proposer/(?P[-A-Za-z]+)/$', login_required(views.produit_proposer), name='produit_proposer', ),\n url(r'^marche/proposer/', login_required(views.proposerProduit_entree), name='produit_proposer_entree',),\n\n # url(r'^list$', views.product_list),\n # url(r'^list2/$', FilterView.as_view(model=Produit, filterset_class=ProductFilter,)),\n url(r'^marche/$', login_required(views.ListeProduit.as_view()), name=\"marche\"),\n url(r'^marche/lister/$', login_required(views.ListeProduit.as_view()), name=\"marche\"),\n url(r'^marche/supprimerProduits_expires_confirmation/$', views.supprimerProduits_expires_confirmation, name=\"supprimerProduits_expires_confirmation\"),\n url(r'^marche/supprimerProduits_expires/$', views.supprimerProduits_expires, name=\"supprimerProduits_expires\"),\n url(r'^marche/lister_offres/', login_required(views.ListeProduit_offres.as_view()),\n name=\"marche_offres\"),\n url(r'^marche/lister_recherches/', login_required(views.ListeProduit_recherches.as_view()),\n name=\"marche_recherches\"),\n\n url(r'^marche/detail/(?P[0-9]+)/$', login_required(views.detailProduit), name='produit_detail',),\n\n url(r'^marche/modifier/(?P[0-9]+)/$',\n login_required(views.ProduitModifier.as_view()), name='produit_modifier', ),\n url(r'^marche/contacterProducteur/(?P[0-9]+)/$',\n login_required(views.produitContacterProducteur), name='produit_contacterProducteur', ),\n url(r'^marche/supprimer/(?P[0-9]+)/$',\n login_required(views.ProduitSupprimer.as_view()), name='produit_supprimer', ),\n\n url(r'^panier/afficher/$', login_required(views.afficher_panier), name='panier_afficher', ),\n\n url(r'^panier/ajouter/(?P[0-9]+)/(?P[0-9]{1,3}([.]{0,1}[0-9]{0,3}))/$',\n login_required(views.ajouterAuPanier), name='produit_ajouterAuPanier', ),\n\n url(r'^panier/supprimerItem/(?P[0-9]+)',\n login_required(views.enlever_du_panier), name='supprimerDuPanier', ),\n\n url(r'^requetes/afficher/$',\n login_required(views.afficher_requetes), name='afficher_requetes', ),\n\n url(r'^conversations/(?P[-\\w.]+)$', login_required(views.lireConversation), name='lireConversation'),\n url(r'^conversations/(?P[-\\w.]+)/(?P[-\\w.]+)$', login_required(views.lireConversation_2noms), name='lireConversation_2noms'),\n url(r'^conversations/$', login_required(views.ListeConversations.as_view()), name='conversations'),\n url(r'^conversations/chercher/$', login_required(views.chercherConversation), name='chercher_conversation'),\n url(r'^suivre_conversation/$', views.suivre_conversations, name='suivre_conversations'),\n url(r'^suivre_produits/$', views.suivre_produits, name='suivre_produits'),\n\n url(r'^agora/$', login_required(views.agora), name='agora'),\n url(r'^agora_permacat/$', login_required(views.agora_permacat), name='agora_permacat'),\n url(r'^agora_rtg/$', login_required(views.agora_rtg), name='agora_rtg'),\n\n url(r'^activity/', include('actstream.urls')),\n\n path(r'wiki_ecovillage_notifications/', include('django_nyt.urls')),\n path(r'wiki_ecovillage/', include('wiki.urls'))\n]\nurlpatterns += [\n url(r'^robots\\.txt$', TemplateView.as_view(template_name=\"bourseLibre/robots.txt\", content_type='text/plain')),\n]\n\nfrom django.conf import settings\nif settings.DEBUG:\n from django.conf.urls.static import static\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nhandler404 = 'bourseLibre.views.handler404'\nhandler500 = 'bourseLibre.views.handler500'\nhandler400 = 'bourseLibre.views.handler400'\nhandler403 = 'bourseLibre.views.handler403'\n\nif settings.LOCALL:\n import debug_toolbar\n urlpatterns = [url(r'^__debug__/', include(debug_toolbar.urls)),] + urlpatterns\n #urlpatterns += url('',(r'^media/(?P.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}))\n\n","sub_path":"bourseLibre/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":9509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"196876180","text":"from django.shortcuts import render, HttpResponse\nfrom xml.etree import ElementTree as ET\nimport hashlib, time\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .config import TOKEN, mp\n\n# Create your views here.\n@csrf_exempt\ndef validate(request):\n if request.method == 'POST':\n xml = ET.fromstring(request.body)\n return render(request,'mp/yb_reply_text.xml', {\n 'FromUserName': xml.find('ToUserName').text,\n 'ToUserName': xml.find('FromUserName').text,\n 'CreateTime': str(int(time.time()))\n })\n if request.method == 'GET':\n tmpArr = [\n TOKEN,\n request.GET['timestamp'],\n request.GET['nonce'],\n ]\n tmpArr.sort()\n temp = hashlib.sha1(''.join(tmpArr).encode('utf-8')).hexdigest()\n if temp == request.GET['signature']:\n return HttpResponse(request.GET['echostr'])\n return HttpResponse('')\n\n# @login_required(login_url='/yiban/before_bind?op=yiban')","sub_path":"Django/root/mp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"488207236","text":"from pymongo import MongoClient\n\ncliente = MongoClient('mongodb://localhost:27017')\n\n#estudiante = {'nombre': 'Eduardo', 'apellido': 'Sanz'}\n#Insertar un documento\n#cliente.universidad.alumnos.insert_one(estudiante)\n\nbd = cliente['universidad']\ncoleccion = bd['alumnos']\n#Insertar varios documentos a la vez\nestudiantes = [{'nombre': 'Ramón', 'apellido': 'Ayala'},\n {'nombre': 'Peter', 'apellido': 'Capusotto'},\n {'nombre': 'Alfredo', 'apellido': 'Nadie',\n 'hijos':[{'nombre': 'Jacinto', 'edad': 10},\n {'nombre': 'Jimena', 'edad': 7}]}\n ]\n\ncoleccion.insert_many(estudiantes)\nprint(\"Datos Subidos\")","sub_path":"Clase 10/insert_mongo.py","file_name":"insert_mongo.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"539918505","text":"#imports\nimport random\nimport sys\n\n\n\n#text walls\nintro = \"Before the journey begins, your soul must reincarnate into the husk of a Bounded Knight, Assassin, or Forbidden Traveler\"\n\ndef exit_fun(): \n sys.exit(\"You have lost this life, but your soul is hungry for resurrection.\")\n\n\n#Class for Character Selection, defines attack, defense, magic\nclass character_class:\n def __init__(self,atk,df,fate):\n self.atk = atk\n self.df = df\n self.fate = fate\n \n#Class Choices\nBounded_Knight = character_class(50,40,10) \nAssassin = character_class(60,25,15)\nForbidden_Traveler = character_class(20,20,60)\n \n#Difficulty Modifiers\neasy = 0.8\nnormal = 1\nhard = 1.2\n\n#Starting Vars\ngold = 50\ninventory = []\n\n\n\n#difficulty selection\nwhile True:\n #choose from easy, normal, hard\n diff = input(\"Choose Your Difficult: \")\n if diff == \"easy\":\n current_diff = easy\n break\n elif diff == \"normal\":\n current_diff = normal\n break\n elif diff == \"hard\":\n current_diff = hard\n break\n else:\n print(\"Not a valid input, please try again\")\n\n\n\nprint (intro)\n\n#Class Selection\nwhile True:\n diff = input(\"Choose Your Class: \")\n if diff == \"Bounded Knight\":\n current_class = Bounded_Knight\n break\n elif diff == \"Assassin\":\n current_class = Assassin\n break\n elif diff == \"Forbidden Traveler\":\n current_class = Forbidden_Traveler\n break\n else:\n print(\"Not a valid input, please try again\")\n\n\nprint(\"A man approaches you with a rock in his right hand. He seems hungry.\")\n\n#choose \"\"attack\" or \"offer gold\"\nchoice_1 = input(\"Do you attack him, or offer him a few gold? \") \n\n#First attack encounter; class descriptions, no reliance on attack or defense stats or difficulty modifiers\nif choice_1 == \"attack\":\n if current_class == Bounded_Knight:\n print(\"You unsheathe your large sword and swing horizontally in a large swooping motion. The length of the blade cuts the man in two before he even had a chance to enter arms length of you\") \n elif current_class == Assassin:\n print(\"You sprint up to the man, pull out the knife hidden in your sleeve, and cut his jugular before he even has a chance to react\")\n elif current_class == Forbidden_Traveler:\n if Forbidden_Traveler.fate > random.randint(40,70):\n print(\"You begin to whisper the unholy cantations of your lost tribe. Suddenly, the man is struck down by lightning.\")\n else:\n print(\"You mutter to the Lost Gods, but the man begins to pummel your head in with a rock. It seems the Gods have forsaken you for today.\")\n exit_fun()\n\nif choice_1 == \"offer gold\":\n gold -= 5\n print(f\"You throw five gold pieces near his feet. The man eagerly kneels to gather them as you walk away. You have {gold} remaining gold left. \")\n\n\n\n\n \n\n","sub_path":"pass.py","file_name":"pass.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"546239768","text":"import bcrypt\nimport datetime\nfrom bson.objectid import ObjectId\nfrom flask import jsonify, request\nfrom functools import wraps\nfrom pymongo import MongoClient\nfrom bson.json_util import dumps, loads\n\nfrom ...common.response import getResponse\nfrom ...db import db, encodeToken, decodeToken\n\norders = db['orders']\n\n\ndef initOrdersRoutes(app):\n @app.route('/api/orders/get/all', methods=['POST'])\n def orders_get():\n return jsonify(getAllActiveOrders())\n\n @app.route('/api/orders/remove', methods=['POST'])\n def orders_remove():\n orderId = request.form.get('orderId')\n\n if not orderId:\n return jsonify(getResponse(False, \"missing parameters in the request\", \"missingParams\"))\n\n return jsonify(deactivateOrder(orderId))\n\n\n# ================ HELPERS ==================\n\ndef saveOrder(order, userPublic):\n newOrder = {\"name\": userPublic[\"name\"],\n \"phone\": userPublic[\"phone\"],\n \"address\": userPublic[\"address\"],\n \"order\": order,\n \"created\": datetime.datetime.utcnow(),\n \"isActive\": True}\n\n order_id = orders.insert_one(newOrder).inserted_id\n\n if order_id == None:\n return getResponse(False, \"system error\", \"systemError\")\n else:\n res = getResponse(True, \"success creating a new order\", data={\"orderId\": order_id})\n res[\"authToken\"] = encodeToken(str(order_id))\n return res\n\n\ndef getAllActiveOrders():\n # find last 100 active orders\n m_orders = list(orders.find({\"isActive\": True}).sort(\"created\", 1).limit(100))\n\n # serialize ObjectId\n for item in m_orders:\n item[\"_id\"] = str(item[\"_id\"])\n\n if not m_orders:\n return getResponse(False, \"no orders\", \"noOrders\")\n return getResponse(True, \"found orders\", data={\"orders\": m_orders})\n\n\ndef deactivateOrder(orderId):\n id = ObjectId(orderId)\n\n print(orders.find_one({'_id': id, \"isActive\": True}))\n res = orders.update({'_id': id}, {\"$set\": {\"isActive\": False}}, upsert=False)\n print(orders.find_one({'_id': id, \"isActive\": True}))\n return getResponse(True, \"order deleted\", data=res)\n","sub_path":"src/app/routes/orders/orders.py","file_name":"orders.py","file_ext":"py","file_size_in_byte":2138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"265719891","text":"import os, logging\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nDEBUG = os.getenv(\"ENVIRONEMENT\") == \"DEV\"\nHOST = os.getenv('HOST', '0.0.0.0')\nPORT = int(os.getenv('PORT', '5000'))\n\nSECRET_KEY = os.getenv('SECRET_KEY', 't1NP63m4wnBg6nyHYKfmc2TpCOGI4nss')\nFRONT_URL = os.environ.get(\"FRONT_URL\", 'http://127.0.0.1:8080/')\n\nAUTHMACHINE_URL = os.environ['AUTHMACHINE_URL']\nAUTHMACHINE_CLIENT_ID = os.environ['AUTHMACHINE_CLIENT_ID']\nAUTHMACHINE_CLIENT_SECRET = os.environ['AUTHMACHINE_CLIENT_SECRET']\nAUTHMACHINE_API_TOKEN = os.environ.get('AUTHMACHINE_API_TOKEN')\nAUTHMACHINE_SCOPE = 'openid email profile'\n\n\nlogging.basicConfig(\n filename=os.getenv('SERVICE_LOG', 'server.log'),\n level=logging.DEBUG,\n format='%(levelname)s: %(asctime)s pid:%(process)s module:%(module)s %(message)s',\n datefmt='%d/%m/%y %H:%M:%S',\n)\n\n\n","sub_path":"backend/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"648208180","text":"import platform\nfrom scripts.azure_utils import setContext\nfrom azureml.core.image import ContainerImage\nfrom scripts.azure_utils import *\n\nclass BaseContext:\n\n '''\n Contains base context items\n '''\n def __init__(self, programArgs, userAuthorization):\n self.programArguments = programArgs\n self.authentication = userAuthorization\n self.platform = platform.system().lower()\n self.workspace = None\n self.experiment = None\n self.model = None\n\n if not self.authentication:\n raise Exception(\"Authentication object missing\")\n\n '''\n Change the context to the provided subscription id\n This expects that an az login has already occured with a user\n that has the correct credentials.\n '''\n setContext(self.programArguments.subid)\n\n\n def generateWorkspace(self):\n '''\n Gets an existing workspace (by name) or creates a new one\n '''\n \n self.workspace = getWorkspace(\n self.authentication, \n self.programArguments.subid, \n self.programArguments.resourceGroup,\n self.programArguments.workspace,\n self.programArguments.region\n )\n\n if not self.workspace:\n raise Exception(\"Workspace Creation Failed\")\n\n def generateExperiment(self):\n '''\n Get an existing experiment by name, or create new\n '''\n self.experiment = getExperiment(self.workspace, self.programArguments.experiment)\n\n if not self.experiment:\n raise Exception(\"Experiment Creation Failed\")\n\n\n","sub_path":"contexts/basecontext.py","file_name":"basecontext.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"248466043","text":"import numpy as np\nimport matplotlib.pyplot as plt\nx=np.array([15,12,8,8,7,7,7,6,5,3])\ny=np.array([10,25,17,11,13,17,20,13,9,15])\nplt.scatter(x,y,color=\"m\",marker=\"o\",s = 50)\nn=np.size(x)\nxm=np.mean(x)\nym=np.mean(y)\ns1=0\nfor i in range(n):\n\ts1=s1+(y[i]*x[i]-ym*x[i])\ns2=0\nfor i in range(n):\n\ts2=s2+(x[i]*x[i]-xm*x[i])\na=s1/s2\nb=(ym-(a*xm))\nx=5\ny=a*x+b\nprint(\"History=%.1f\" % y)\n\n\t\n","sub_path":"linear_03.py","file_name":"linear_03.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"415769743","text":"\"\"\" Controls for the registry service \"\"\"\nfrom click import echo\n\nfrom bwdt.constants import KOLLA_IMAGE_TAGS, SERVICE_IMAGE_TAGS\nfrom bwdt.lib.container import Docker\n\n\ndef start(ip='0.0.0.0', port=5000):\n \"\"\" Start the registry container \"\"\"\n name = 'registry'\n repo = 'registry'\n tag = SERVICE_IMAGE_TAGS[repo]\n http_addr = \"{}:{}\".format(ip, port)\n image = '{}:{}'.format(repo, tag)\n docker_kwargs = {\n 'environment': {'REGISTRY_HTTP_ADDR': http_addr},\n 'ports': {port: port}\n }\n docker = Docker()\n docker.pull(repository=repo, tag=tag)\n success = docker.run(image, name=name, **docker_kwargs)\n return success\n\n\ndef sync_image(registry_url, image, tag=None):\n \"\"\" Pull images from upstream or import from media, push to registry \"\"\"\n if tag is None:\n tag = KOLLA_IMAGE_TAGS[image]\n docker = Docker()\n docker.pull(image, tag)\n echo('> Applying new tag')\n docker.retag(image, tag, registry_url)\n echo('> Pushing {}:{} to {}'.format(image, tag, registry_url))\n docker.push(image, tag, registry_url)\n\n\ndef sync_all_images(registry_url, tag=None):\n \"\"\" Sync all images to registry_url \"\"\"\n i = 0\n length = len(KOLLA_IMAGE_TAGS)\n for image in KOLLA_IMAGE_TAGS:\n echo('Progress: {} / {}'.format(i, length))\n sync_image(registry_url, image, tag)\n i += 1\n","sub_path":"bwdt/services/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"175732186","text":"import pygame, random, math\n\ndef addVectors(firstAngle, firstLength, secondAngle, secondLength):\n\n\txPosition = math.sin(firstAngle) * firstLength + math.sin(secondAngle) * secondLength\n\tyPosition = math.cos(firstAngle) * firstLength + math.cos(secondAngle) * secondLength\n\n\tobjectAngle = 0.5 * math.pi - math.atan2(yPosition, xPosition)\n\tobjectLength = math.hypot(xPosition, yPosition)\n\n\treturn objectAngle, objectLength\n\n#Collusion of objects\ndef collide(firstObject, secondObject):\n\telasticity = 0.9\n\n\t#Vector coordinates\n\tvectorX = firstObject.x - secondObject.x\n\tvectorY = firstObject.y - secondObject.y\n\n\tdistanceBetween = math.hypot(vectorX, vectorY)\n\tif distanceBetween <= firstObject.radius + secondObject.radius:\n\n\t\t#Object trajectory\n\t\tobjectTangent = math.atan2(vectorY, vectorX)\n\t\tobjectAngle = 0.5 * math.pi + objectTangent\n\n\t\t#Total weight of two objects\n\t\tobjectTotalWeight = firstObject.weight + secondObject.weight\n\n\n\n\t\t#Angle and velosity after collusion\n\t\tfirstObject.angle, firstObject.speed = addVectors(firstObject.angle, firstObject.speed * (firstObject.weight - secondObject.weight)\\\n\t\t\t / objectTotalWeight, objectAngle, 2 * secondObject.speed * secondObject.weight / objectTotalWeight)\n\n\t\tsecondObject.angle, secondObject.speed = addVectors(secondObject.angle, secondObject.speed * (secondObject.weight - firstObject.weight)\\\n\t\t\t / objectTotalWeight, objectAngle + math.pi, 2 * firstObject.speed * firstObject.weight / objectTotalWeight)\n\n\t\tfirstObject.speed *= elasticity\n\t\tsecondObject.speed *= elasticity\n\n\t\t#Interaction of two objects\n\t\tobjectOverlap = 0.5 * (firstObject.radius + secondObject.radius - distanceBetween + 1)\n\n\t\t#Position change on screen\n\t\tfirstObject.x += math.sin(objectAngle) * objectOverlap\n\t\tfirstObject.y -= math.cos(objectAngle) * objectOverlap\n\n\t\tsecondObject.x -= math.sin(objectAngle) * objectOverlap\n\t\tsecondObject.y += math.cos(objectAngle) * objectOverlap\n\nclass BallObject:\n\n\t#Constructor\n\tdef __init__(self, x, y, radius, colour):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.radius = radius\n\t\tself.colour = colour\n\t\tself.weight = radius * (256 - colour)\n\t\tself.border = 0\n\t\tself.speed = 0\n\t\tself.angle = 0\n\n\t#Draw object on the screen\n\tdef drawObject(self):\n\t\tpygame.draw.circle(screen, (self.colour, 0, self.colour), (int(self.x), int(self.y)), self.radius, self.border)\n\n\t#The movement of object\n\tdef moveObject(self):\n\t\tself.x += math.sin(self.angle) * self.speed\n\t\tself.y -= math.cos(self.angle) * self.speed\n\n\t#Check the boundaries of screen\n\tdef bounceOfObject(self):\n\t\tif self.x > width - self.radius:\n\t\t\tself.x = 2 * (width - self.radius) - self.x\n\t\t\tself.angle = - self.angle\n\n\t\telif self.x < self.radius:\n\t\t\tself.x = 2 * self.radius - self.x\n\t\t\tself.angle = - self.angle\n\n\t\tif self.y > height - self.radius:\n\t\t\tself.y = 2 * (height - self.radius) - self.y\n\t\t\tself.angle = math.pi - self.angle\n\n\t\telif self.y < self.radius:\n\t\t\tself.y = 2 * self.radius - self.y\n\t\t\tself.angle = math.pi - self.angle\n\n\t#Display object\n\tdef showObject(self):\n\t\tself.moveObject()\n\t\tself.bounceOfObject()\n\t\tself.drawObject()\n\ndef main():\n\n\t#Some global parameters\n\tglobal width, height, screen\n\twidth, height = 640, 480\n\tbackgroundColour = (255, 255, 255)\n\n\tscreen = pygame.display.set_mode((width, height))\n\tpygame.display.set_caption('Objects collusion model')\n\n\t#Amount of objects\n\tnumberOfObjects = random.randint(10, 30)\n\t#numberOfObjects = 100\n\tobjects = []\n\n\tfor i in range(numberOfObjects):\n\n\t\t#Generating object radius and weight\n\t\tradius = random.randint(10, 20)\n\t\tcolour = random.randint(100, 255)\n\n\t\t#Start position of object\n\t\tx = random.randint(radius, width - radius)\n\t\ty = random.randint(radius, height - radius)\n\n\t\t#Defining object\n\t\tball = BallObject(x, y, radius, colour)\n\t\tball.speed = random.random()\n\t\tball.angle = random.uniform(0, math.pi*2)\n\n\t\tobjects.append(ball)\n\n\t#Infinite loop\n\tdone = False\n\twhile not done:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tdone = True\n\n\t\tscreen.fill(backgroundColour)\n\n\t\t#Magic happens here\n\t\tfor i, mainBall in enumerate(objects):\n\t\t\tfor otherBall in objects[i+1:]:\n\t\t\t\tcollide(mainBall, otherBall)\n\t\t\tmainBall.showObject()\n\t\tpygame.display.flip()\n\nif __name__ == '__main__':\n\tmain()","sub_path":"PhysicsModeling/PhysicalCollusion.py","file_name":"PhysicalCollusion.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"544556471","text":"from flask import Flask, render_template, request, url_for, redirect, abort\nfrom tools import jsonify\nfrom flask.ext.pymongo import PyMongo\n\napp = Flask(__name__)\napp.config['MONGO_DBNAME'] = 'verdinha'\n#app.config[\"SECRET_KEY\"] = \"KeepThisS3cr3t\"\nmongo = PyMongo(app)\n\n@app.route(\"/\")\ndef index():\n\treturn render_template('front.html')\n\n@app.route(\"/busca/\")\ndef busca(nome):\n\tresultado = mongo.db.doacoes.find_one({\"nome\": nome})\n\tif resultado:\n\t\treturn jsonify(resultado)\n\telse:\n\t\tabort(404)\n\nif __name__ == \"__main__\":\n\tapp.run(debug=True)","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"548212487","text":"\"\"\"\nCheck of correctness of brackets in given string.\nInput: source code of some program.\nOutput: index of the first error or 'Success' string otherwise.\n\"\"\"\n\nclass Stack(object):\n def __init__(self):\n self.l = []\n\n def pop(self):\n return self.l.pop()\n\n def push(self, v):\n self.l.append(v)\n\n def get(self):\n if len(self.l):\n return self.l[-1]\n return None\n\n def __len__(self):\n return len(self.l)\n\n\ndef check_brackets(str):\n stack = Stack()\n stack_index = Stack()\n for i, c in enumerate(str):\n # Put opening brackets into stack\n if c in '({[':\n stack.push(c)\n stack_index.push(i+1)\n # Check closing brackets\n brackets = ['[]', '()', '{}']\n for br_open, br_close in brackets:\n if c == br_close:\n if stack.get() != br_open:\n return i + 1\n else:\n stack.pop()\n stack_index.pop()\n if len(stack) == 0:\n return 'Success'\n return stack_index.get()\n\n\ndef test(foo):\n test_cases = [\n ['[]', 'Success'],\n ['{}[]', 'Success'],\n ['[()]', 'Success'],\n ['(())', 'Success'],\n ['{[]}()', 'Success'],\n ['{', 1],\n ['{[}', 3],\n ['foo(bar);', 'Success'],\n ['foo(bar[i);', 10],\n ['([](){([])})', 'Success'],\n ['()[]}', 5],\n ['{{[()]]', 7],\n ['{}([]', 3]\n ]\n for i, (input_data, expected) in enumerate(test_cases):\n res = foo(input_data)\n assert res == expected, f\"Wrong case #{i+1}. Input: {input_data}. Expected {expected}, got {foo(input_data)}\"\n\nif __name__ == \"__main__\":\n test(check_brackets)\n","sub_path":"open_brackets.py","file_name":"open_brackets.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"51497164","text":"import melee\nimport globals\nimport Chains\nfrom Chains.wavedash import Wavedash\nfrom Chains.dashdance import DashDance\nfrom Chains.grabedge import Grabedge\nfrom melee.enums import Action, Character\n\nclass Retreat():\n chain = None\n \n def pickchain(self, chain, args=[]):\n if type(self.chain) != chain:\n self.chain = chain(*args)\n self.chain.step()\n \n def shouldretreat():\n opponent_state = globals.opponent_state\n smashbot_state = globals.smashbot_state\n\n if smashbot_state.invulnerability_left > 1:\n return False\n\n shieldactions = [Action.SHIELD_START, Action.SHIELD, Action.SHIELD_RELEASE, \\\n Action.SHIELD_STUN, Action.SHIELD_REFLECT]\n\n # FireFox is different\n firefox = opponent_state.action in [Action.SWORD_DANCE_4_HIGH, Action.SWORD_DANCE_4_MID, Action.SWORD_DANCE_3_MID, Action.SWORD_DANCE_3_LOW] \\\n and opponent_state.character in [Character.FOX, Character.FALCO]\n if firefox:\n return True\n\n # If opponent is landing from an attack, and we're sheilding, retreat!\n if opponent_state.action in [Action.DAIR_LANDING, Action.NAIR_LANDING, Action.FAIR_LANDING, \\\n Action.UAIR_LANDING, Action.BAIR_LANDING, Action.LANDING] and smashbot_state.action in shieldactions:\n return True\n\n # If opponent is falling, and we're in shield, retreat\n if opponent_state.speed_y_self < 0 and not opponent_state.on_ground and smashbot_state.action in shieldactions:\n return True\n\n if opponent_state.action == Action.LOOPING_ATTACK_MIDDLE:\n return True\n\n return False\n\n def step(self):\n #If we can't interrupt the chain, just continue it\n if self.chain != None and not self.chain.interruptible:\n self.chain.step()\n return\n\n needswavedash = globals.smashbot_state.action in [Action.DOWN_B_GROUND, Action.DOWN_B_STUN, \\\n Action.DOWN_B_GROUND_START, Action.LANDING_SPECIAL, Action.SHIELD, Action.SHIELD_START, \\\n Action.SHIELD_RELEASE, Action.SHIELD_STUN, Action.SHIELD_REFLECT]\n if needswavedash:\n self.pickchain(Wavedash, [1, False])\n return\n\n bufferzone = 30\n onright = globals.opponent_state.x < globals.smashbot_state.x\n if not onright:\n bufferzone *= -1\n\n pivotpoint = globals.opponent_state.x + bufferzone\n # Don't run off the stage though, adjust this back inwards a little if it's off\n\n edgebuffer = 30\n edge = melee.stages.edgegroundposition(globals.gamestate.stage) - edgebuffer\n # If we are about to pivot near the edge, just grab the edge instead\n if abs(pivotpoint) > edge:\n self.pickchain(Grabedge)\n return\n\n pivotpoint = min(pivotpoint, edge)\n pivotpoint = max(pivotpoint, -edge)\n\n self.chain = None\n self.pickchain(DashDance, [pivotpoint])\n","sub_path":"Tactics/retreat.py","file_name":"retreat.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"474661494","text":"from apimanager import prices\nfrom apimanager import ApiManager\nimport operator\n\nclass PriceBot():\n\n def __init__(self, api_manager,item_list):\n self._api_manager = api_manager\n self._item_list = item_list\n\n\n @property\n def item_list(self):\n return self._item_list\n\n @item_list.setter\n def item_list(self, items_list):\n self._item_list = items_list\n\n @property\n def delay(self):\n return self._delay\n \n @delay.setter\n def delay(self, time_delay):\n self._delay = time_delay\n\n \n def price_check(self, amount):\n\n profit_list = list()\n #print('I made it here!')\n #print(self.item_list)\n\n for item in self.item_list:\n #print(item)\n try:\n item_choice = self._api_manager.valid_item(item)\n # print(item_choice)\n profit = prices(item_choice)[2]\n #print(profit)\n #profit = profit\n #print(profit)\n profit_list.append((item, profit))\n #print(profit_list)\n except:\n pass\n if not amount:\n profit_list = remove_nons(profit_list)\n #print(profit_list)\n if profit_list is not None:\n profit_list = psort(profit_list)\n #print(profit_list)\n return profit_list\n\ndef remove_nons(item_list):\n \"\"\"\n removes all non profitable items from the list\n \"\"\"\n new_list = item_list\n for item in reversed(item_list):\n #print(item)\n if int(item[1]) <= 0:\n new_list.remove(item)\n #print(new_list)\n if len(new_list) > 0:\n return new_list\n else:\n return None\n\ndef psort(item_list):\n \"\"\"\n sorts the list from greatest to least\n \"\"\"\n new_list = sorted(item_list, key=operator.itemgetter(1))\n return new_list[::-1]","sub_path":"Obsolete/price_bot.py","file_name":"price_bot.py","file_ext":"py","file_size_in_byte":1885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"121466678","text":"# -*- coding: utf-8 -*-\r\nimport numpy as np\r\n\r\nclass MahjongCard:\r\n \r\n def __init__(self, kind, num):\r\n \r\n self.id = -1\r\n self.kind = kind\r\n self.num = num\r\n self.kn2id()\r\n \r\n \r\n def kn2id(self):\r\n \r\n if self.kind in [0, 1, 2]: \r\n self.id = self.kind*9 + self.num\r\n elif self.kind == 3:\r\n self.id = 27 + self.num\r\n elif self.kind == 4:\r\n self.id = 31 + self.num\r\n \r\n \r\n @staticmethod\r\n def id2kn(_id):\r\n \r\n if _id < 27:\r\n kind = int(_id/9)\r\n num = _id % 9\r\n elif _id < 31:\r\n kind = 3\r\n num = _id - 27\r\n elif _id < 34:\r\n kind = 4\r\n num = _id - 31\r\n return kind, num\r\n \r\n \r\n\r\nclass MahjongPlayer:\r\n \r\n def __init__(self):\r\n \r\n self.cards = np.zeros(34)\r\n self.hide = np.zeros(34)\r\n self.expose = np.zeros(34)\r\n\r\n\r\n def add_card(self, card):\r\n \r\n self.cards[card.id] += 1\r\n\r\n\r\n def set_cards(self, cards):\r\n\r\n self.cards = cards.copy()\r\n\r\n\r\n\r\nclass MahjongJudge:\r\n\r\n def __init__(self, player=None):\r\n\r\n self.player = player\r\n\r\n\r\n @staticmethod\r\n def analysenumbercards(cards):\r\n\r\n sumofcards = np.sum(cards)\r\n if sumofcards == 2:\r\n if np.max(cards) == 2:\r\n return np.array([1, 0, 0])\r\n else:\r\n return np.array([0, 0, 0])\r\n elif sumofcards == 3:\r\n if np.max(cards) == 3:\r\n return np.array([0, 1, 0])\r\n start = np.where(cards > 0)[0][0]\r\n if start == len(cards) - 2:\r\n return np.array([0, 0, 0])\r\n else:\r\n if cards[start] * cards[start+1] * cards[start+2] == 1:\r\n return np.array([0, 0, 1])\r\n else:\r\n return np.array([0, 0, 0])\r\n elif sumofcards == 5:\r\n if np.max(cards) == 1:\r\n return np.array([0, 0, 0])\r\n else:\r\n temp = np.where(cards > 1)[0][0]\r\n next_cards = cards.copy()\r\n next_cards[temp] -= 2\r\n if sum(MahjongJudge.analysenumbercards(next_cards)) == 1:\r\n return (1, 0, 0) + MahjongJudge.analysenumbercards(next_cards)\r\n else:\r\n return np.array([0, 0, 0])\r\n elif sumofcards == 6:\r\n start = np.where(cards > 0)[0][0]\r\n if cards[start] == 2:\r\n if start < 7:\r\n if cards[start] * cards[start+1] * cards[start+2] == 8:\r\n return np.array([0, 0, 2])\r\n return np.array([0, 0, 0])\r\n elif cards[start] == 1:\r\n if cards[start] * cards[start+1] * cards[start+2] == 0:\r\n return np.array([0, 0, 0])\r\n else:\r\n next_cards = cards.copy()\r\n next_cards[start:start+3] -= 1\r\n if sum(MahjongJudge.analysenumbercards(next_cards)) == 1:\r\n return (0, 0, 1) + MahjongJudge.analysenumbercards(next_cards)\r\n else:\r\n return np.array([0, 0, 0])\r\n else:\r\n next_cards = cards.copy()\r\n next_cards[start] -= 3\r\n if sum(MahjongJudge.analysenumbercards(next_cards)) == 1:\r\n return (0, 1, 0) + MahjongJudge.analysenumbercards(next_cards)\r\n else:\r\n return np.array([0, 0, 0])\r\n elif sumofcards == 8:\r\n if np.max(cards) == 1:\r\n return np.array([0, 0, 0])\r\n temps = np.where(cards > 1)\r\n best_analyse = np.array([0, 0, 0])\r\n for t in temps[0]:\r\n next_cards = cards.copy()\r\n next_cards[t] -= 2\r\n if sum(MahjongJudge.analysenumbercards(next_cards)) > sum(best_analyse):\r\n best_analyse = MahjongJudge.analysenumbercards(next_cards)\r\n if sum(best_analyse) == 2:\r\n return (1, 0, 0) + best_analyse\r\n else:\r\n return np.array([0, 0, 0])\r\n elif sumofcards == 9:\r\n start = np.where(cards > 0)[0][0]\r\n if cards[start] > 2:\r\n next_cards = cards.copy()\r\n next_cards[start] -= 3\r\n if sum(MahjongJudge.analysenumbercards(next_cards)) == 2:\r\n return (0, 1, 0) + MahjongJudge.analysenumbercards(next_cards)\r\n if cards[start] * cards[start+1] * cards[start+2] == 0:\r\n return np.array([0, 0, 0])\r\n else:\r\n next_cards = cards.copy()\r\n next_cards[start:start+3] -= 1\r\n if sum(MahjongJudge.analysenumbercards(next_cards)) == 2:\r\n return (0, 0, 1) + MahjongJudge.analysenumbercards(next_cards)\r\n else:\r\n return np.array([0, 0, 0])\r\n elif sumofcards == 11:\r\n temps = np.where(cards > 1)\r\n best_analyse = np.array([0, 0, 0])\r\n for t in temps[0]:\r\n next_cards = cards.copy()\r\n next_cards[t] -= 2\r\n if sum(MahjongJudge.analysenumbercards(next_cards)) > sum(best_analyse):\r\n best_analyse = MahjongJudge.analysenumbercards(next_cards)\r\n if sum(best_analyse) == 3:\r\n return (1, 0, 0) + best_analyse\r\n else:\r\n return np.array([0, 0, 0])\r\n elif sumofcards == 12:\r\n start = np.where(cards > 0)[0][0]\r\n if cards[start] > 2:\r\n next_cards = cards.copy()\r\n next_cards[start] -= 3\r\n if sum(MahjongJudge.analysenumbercards(next_cards)) == 3:\r\n return (0, 1, 0) + MahjongJudge.analysenumbercards(next_cards)\r\n if cards[start] * cards[start+1] * cards[start+2] == 0:\r\n return np.array([0, 0, 0])\r\n else:\r\n next_cards = cards.copy()\r\n next_cards[start:start+3] -= 1\r\n if sum(MahjongJudge.analysenumbercards(next_cards)) == 3:\r\n return (0, 0, 1) + MahjongJudge.analysenumbercards(next_cards)\r\n else:\r\n return np.array([0, 0, 0])\r\n elif sumofcards == 14:\r\n temps = np.where(cards > 1)\r\n best_analyse = np.array([0, 0, 0])\r\n for t in temps[0]:\r\n next_cards = cards.copy()\r\n next_cards[t] -= 2\r\n if sum(MahjongJudge.analysenumbercards(next_cards)) > sum(best_analyse):\r\n best_analyse = MahjongJudge.analysenumbercards(next_cards)\r\n if sum(best_analyse) == 4:\r\n return (1, 0, 0) + best_analyse\r\n else:\r\n return np.array([0, 0, 0])\r\n else:\r\n return np.array([0, 0, 0])\r\n \r\n\r\n @staticmethod \r\n def analysewordcards(cards):\r\n \r\n result = np.array([0, 0, 0])\r\n for card in cards:\r\n if card == 2:\r\n result[0] += 1\r\n if card == 3:\r\n result[1] += 1\r\n return result\r\n \r\n\r\n def trival_jugde(self, cards=None, mode=0):\r\n\r\n if mode == 1:\r\n cards = self.player.cards\r\n result = MahjongJudge.analysenumbercards(cards[0:9]) + \\\r\n MahjongJudge.analysenumbercards(cards[9:18]) + \\\r\n MahjongJudge.analysenumbercards(cards[18:27]) + \\\r\n MahjongJudge.analysenumbercards(cards[27:34]) \r\n if (result[0] == 1) and (result[1] + result[2] == 4):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n def detect_lack(self):\r\n\r\n lack_cards = []\r\n if sum(self.player.cards) == 13:\r\n for c in range(34):\r\n temp = np.zeros(34)\r\n temp[c] += 1\r\n if self.trival_jugde(self.player.cards + temp):\r\n lack_cards.append(c)\r\n else:\r\n print('wrong cards number')\r\n return lack_cards\r\n \r\n \r\n\r\ndef randomcards():\r\n cards = np.zeros(9)\r\n while sum(cards) < 14:\r\n temp = np.random.randint(0, 9)\r\n if cards[temp] < 4:\r\n cards[temp] += 1\r\n print(cards)\r\n return cards\r\n\r\n\r\ndef randomcompletecards():\r\n s_distribution = [0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4]\r\n cards = np.zeros(34)\r\n n_shun = np.random.choice(s_distribution)\r\n n_ke = 4 - n_shun\r\n c_shun = [np.random.randint(0, 3)*9+np.random.randint(1, 8) for _ in range(n_shun)]\r\n for c in c_shun:\r\n cards[c-1:c+2] += 1\r\n while n_ke > 0:\r\n temp_ind = np.random.randint(0, 34)\r\n if cards[temp_ind] < 2:\r\n cards[temp_ind] += 3\r\n n_ke -= 1\r\n while True:\r\n temp_ind = np.random.randint(0, 34)\r\n if cards[temp_ind] < 3:\r\n cards[temp_ind] += 2\r\n break\r\n return cards\r\n\r\n\r\ndef onestep2completecards():\r\n cards = randomcompletecards()\r\n existcards = np.where(cards>1)[0]\r\n temp_ind = np.random.choice(existcards)\r\n cards[temp_ind] -= 1\r\n return cards","sub_path":"manualjudge.py","file_name":"manualjudge.py","file_ext":"py","file_size_in_byte":7931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"136698315","text":"from ev3sim.visual.menus.utils import CustomScroll\nimport yaml\nimport os\nimport pygame\nimport pygame_gui\nimport sentry_sdk\nfrom ev3sim.file_helper import find_abs, find_abs_directory\nfrom ev3sim.validation.bot_files import BotValidator\nfrom ev3sim.visual.menus.base_menu import BaseMenu\nfrom ev3sim.search_locations import asset_locations, bot_locations, preset_locations\n\n\nclass BotMenu(BaseMenu):\n\n bot_keys = []\n\n def iconPos(self, buttonPos, buttonSize, iconSize):\n return (\n buttonPos[0] + buttonSize[0] / 2 - iconSize[0] / 2,\n buttonPos[1] + buttonSize[1] * 0.2,\n )\n\n def generateObjects(self):\n # First, find all bot files.\n if not self.in_error:\n self.available_bots = []\n error_bots = []\n for rel_dir in bot_locations():\n try:\n actual_dir = find_abs_directory(rel_dir)\n except:\n continue\n for bot in BotValidator.all_valid_in_dir(actual_dir):\n try:\n # Show everything except dir and .bot\n with open(os.path.join(actual_dir, bot, \"config.bot\"), \"r\") as f:\n config = yaml.safe_load(f)\n # If we are hidden, or in edit mode with hidden_edit, then don't show.\n if not config.get(\"hidden\", False) and not (\n config.get(\"hidden_edit\", False) and len(self.bot_keys) == 0\n ):\n self.available_bots.append((bot, os.path.join(actual_dir, bot), rel_dir, bot))\n except Exception as e:\n sentry_sdk.capture_exception(e)\n error_bots.append(os.path.join(actual_dir, bot))\n if self.first_launch and error_bots:\n self.first_launch = False\n self.in_error = True\n self.addErrorDialog(\n 'A problem occured loading the following bots:

'\n + \"
\".join(bot for bot in error_bots)\n + \"
\"\n )\n return\n\n self.bg = pygame_gui.elements.UIPanel(\n relative_rect=pygame.Rect(0, 0, *self._size),\n starting_layer_height=-1,\n manager=self,\n object_id=pygame_gui.core.ObjectID(\"background\"),\n )\n self._all_objs.append(self.bg)\n\n # Scrolling container\n old_y = getattr(getattr(self, \"scrolling_container\", None), \"cur_y\", 0)\n self.scrolling_container = CustomScroll(\n relative_rect=pygame.Rect(0, 0, *self._size),\n manager=self,\n object_id=pygame_gui.core.ObjectID(\"scroll_container\"),\n )\n self.scrolling_container.num_elems = len(self.available_bots)\n scrolling_size = (self._size[0] / 4 + self._size[0] / 5, self._size[1] * 0.9 - min(self._size[1] / 6, 90))\n # Setting dimensions and positions on a UIScrollingContainer seems buggy. This works.\n self.scrolling_container.set_dimensions(scrolling_size)\n self.scrolling_container.set_position(scrolling_size)\n self.scrolling_container.cur_y = old_y\n self.scrolling_container.set_scroll(old_y)\n self._all_objs.append(self.scrolling_container)\n\n button_size = self._size[0] / 4, 60\n info_size = self._size[0] / 4 - 20, 15\n bot_rect = lambda i: (self._size[0] / 10, self._size[1] / 10 + i * button_size[1] * 1.5)\n info_rect = lambda b_r: (\n b_r[0] + button_size[0] - info_size[0] - 10,\n b_r[1] + button_size[1] - info_size[1] - 5,\n )\n self.bot_buttons = []\n self.bot_descriptions = []\n for i, (show, bot, rel_dir, filename) in enumerate(self.available_bots):\n self.bot_buttons.append(\n pygame_gui.elements.UIButton(\n relative_rect=pygame.Rect(*bot_rect(i), *button_size),\n text=show,\n manager=self,\n container=self.scrolling_container,\n object_id=pygame_gui.core.ObjectID(\n show + \"-\" + str(i), \"list_button_highlighted\" if i == self.bot_index else \"list_button\"\n ),\n )\n )\n self.addButtonEvent(show + \"-\" + str(i), self.setBotIndex, i)\n self.bot_descriptions.append(\n pygame_gui.elements.UILabel(\n relative_rect=pygame.Rect(*info_rect(bot_rect(i)), *info_size),\n text=rel_dir,\n manager=self,\n container=self.scrolling_container,\n object_id=pygame_gui.core.ObjectID(\n show + \"-dir-\" + str(i), \"button_info_selected\" if i == self.bot_index else \"button_info\"\n ),\n )\n )\n self._all_objs.extend(self.bot_buttons)\n self._all_objs.extend(self.bot_descriptions)\n\n preview_size = self._size[0] / 4, self._size[1] / 4\n preview_size = (\n min(preview_size[0], (preview_size[1] * 4) // 3),\n min(preview_size[1], (preview_size[0] * 3) // 4),\n )\n try:\n if self.bot_index >= len(self.available_bots):\n self.bot_index = -1\n if self.bot_index == -1:\n image = pygame.Surface(preview_size)\n image.fill(pygame.Color(self.bg.background_colour))\n else:\n with open(os.path.join(self.available_bots[self.bot_index][1], \"config.bot\"), \"r\") as f:\n config = yaml.safe_load(f)\n bot_preview = os.path.join(\n self.available_bots[self.bot_index][1], config.get(\"preview_path\", \"preview.png\")\n )\n image = pygame.image.load(bot_preview)\n except Exception as e:\n sentry_sdk.capture_exception(e)\n self.setBotIndex(-1)\n self.addErrorDialog(\n 'The bot you have selected has some internal errors EV3Sim cannot resolve.

'\n + \"If you'd like to fix this, then try manually editing the bot file in a text editor.\"\n )\n return\n if image.get_size() != preview_size:\n image = pygame.transform.smoothscale(image, [int(v) for v in preview_size])\n self.preview_image = pygame_gui.elements.UIImage(\n relative_rect=pygame.Rect(self._size[0] * 0.9 - preview_size[0], self._size[1] * 0.1, *preview_size),\n image_surface=image,\n manager=self,\n object_id=pygame_gui.core.ObjectID(\"preview-image\"),\n )\n self._all_objs.append(self.preview_image)\n\n if len(self.bot_keys) == 0:\n code_size = preview_size[0] * 0.4, preview_size[1] * 0.4\n code_button_pos = (\n self._size[0] * 0.9 - code_size[0] - 10,\n self._size[1] * 0.1 + preview_size[1] + 10,\n )\n code_icon_size = code_size[1] * 0.6, code_size[1] * 0.6\n self.code_button = pygame_gui.elements.UIButton(\n relative_rect=pygame.Rect(*code_button_pos, *code_size),\n text=\"\",\n manager=self,\n object_id=pygame_gui.core.ObjectID(\"bot-code\", \"settings_buttons\"),\n )\n self.addButtonEvent(\"bot-code\", self.clickCode)\n if not self.code_enable:\n self.code_button.disable()\n code_icon_path = find_abs(\"ui/code.png\", allowed_areas=asset_locations())\n self.code_icon = pygame_gui.elements.UIImage(\n relative_rect=pygame.Rect(*self.iconPos(code_button_pos, code_size, code_icon_size), *code_icon_size),\n image_surface=pygame.image.load(code_icon_path),\n manager=self,\n object_id=pygame_gui.core.ObjectID(\"code-icon\"),\n )\n self._all_objs.append(self.code_button)\n self._all_objs.append(self.code_icon)\n\n edit_button_pos = (\n self._size[0] * 0.9 - preview_size[0],\n self._size[1] * 0.1 + preview_size[1] + 10,\n )\n self.edit_button = pygame_gui.elements.UIButton(\n relative_rect=pygame.Rect(*edit_button_pos, *code_size),\n text=\"\",\n manager=self,\n object_id=pygame_gui.core.ObjectID(\"bot-edit\", \"settings_buttons\"),\n )\n self.addButtonEvent(\"bot-edit\", self.clickEdit)\n if not self.edit_enable:\n self.edit_button.disable()\n edit_icon_path = find_abs(\"ui/edit.png\", allowed_areas=asset_locations())\n self.edit_icon = pygame_gui.elements.UIImage(\n relative_rect=pygame.Rect(*self.iconPos(edit_button_pos, code_size, code_icon_size), *code_icon_size),\n image_surface=pygame.image.load(edit_icon_path),\n manager=self,\n object_id=pygame_gui.core.ObjectID(\"edit-icon\"),\n )\n self._all_objs.append(self.edit_button)\n self._all_objs.append(self.edit_icon)\n\n new_size = self._size[0] / 8, min(self._size[1] / 6, 90)\n new_icon_size = new_size[1] * 0.6, new_size[1] * 0.6\n new_bot_pos = (bot_rect(0)[0] + button_size[0] - new_size[0], self._size[1] * 0.9 - new_size[1])\n self.new_bot = pygame_gui.elements.UIButton(\n relative_rect=pygame.Rect(*new_bot_pos, *new_size),\n text=\"\",\n manager=self,\n object_id=pygame_gui.core.ObjectID(\"new_bot\", \"action_button\"),\n )\n self.addButtonEvent(\"new_bot\", self.clickNew)\n new_bot_path = find_abs(\"ui/add.png\", allowed_areas=asset_locations())\n self.new_icon = pygame_gui.elements.UIImage(\n relative_rect=pygame.Rect(*self.iconPos(new_bot_pos, new_size, new_icon_size), *new_icon_size),\n image_surface=pygame.image.load(new_bot_path),\n manager=self,\n object_id=pygame_gui.core.ObjectID(\"new_bot-icon\"),\n )\n self._all_objs.append(self.new_bot)\n self._all_objs.append(self.new_icon)\n\n remove_bot_pos = (bot_rect(0)[0], self._size[1] * 0.9 - new_size[1])\n self.remove_bot = pygame_gui.elements.UIButton(\n relative_rect=pygame.Rect(*remove_bot_pos, *new_size),\n text=\"\",\n manager=self,\n object_id=pygame_gui.core.ObjectID(\"remove_bot\", \"cancel-changes\"),\n )\n self.addButtonEvent(\"remove_bot\", self.clickRemove)\n if not self.remove_enable:\n self.remove_bot.disable()\n remove_bot_path = find_abs(\"ui/bin.png\", allowed_areas=asset_locations())\n self.remove_icon = pygame_gui.elements.UIImage(\n relative_rect=pygame.Rect(*self.iconPos(remove_bot_pos, new_size, new_icon_size), *new_icon_size),\n image_surface=pygame.image.load(remove_bot_path),\n manager=self,\n object_id=pygame_gui.core.ObjectID(\"remove_bot-icon\"),\n )\n self._all_objs.append(self.remove_bot)\n self._all_objs.append(self.remove_icon)\n super().generateObjects()\n else:\n # Bot key locations, for selecting bots in batch files.\n self.bot_loc_spots = []\n for i in range(len(self.bot_keys)):\n if i == self.key_index:\n self.bot_loc_spots.append(self.createBotImage(i, bg=pygame.Color(\"#80b918\")))\n else:\n self.bot_loc_spots.append(self.createBotImage(i))\n self.sizeBotImage(i, big_mode=len(self.bot_keys) == 1)\n img = self.preview_images[i]\n if img is None:\n continue\n if img.get_size() != self.bot_loc_spots[i].rect.size:\n img = pygame.transform.smoothscale(\n img, (self.bot_loc_spots[i].rect.width, self.bot_loc_spots[i].rect.height)\n )\n self.bot_loc_spots[i].set_image(img)\n self._all_objs.extend(self.bot_loc_spots)\n\n select_size = (self._size[0] / 4 - 20) / 2, min(self._size[1] / 4, 120)\n select_button_pos = (self._size[0] * 0.9 - select_size[0] * 2 - 15, self._size[1] * 0.9 - select_size[1])\n self.select_button = pygame_gui.elements.UIButton(\n relative_rect=pygame.Rect(*select_button_pos, *select_size),\n text=\"SELECT\",\n manager=self,\n object_id=pygame_gui.core.ObjectID(\"select-bot\", \"action_button\"),\n )\n self.addButtonEvent(\"select-bot\", self.clickSelect)\n if not self.select_enable:\n self.select_button.disable()\n self._all_objs.append(self.select_button)\n\n done_size = (self._size[0] / 4 - 20) / 2, min(self._size[1] / 4, 120)\n done_button_pos = (self._size[0] * 0.9 - select_size[0] - 5, self._size[1] * 0.9 - select_size[1])\n self.done_button = pygame_gui.elements.UIButton(\n relative_rect=pygame.Rect(*done_button_pos, *done_size),\n text=\"DONE\",\n manager=self,\n object_id=pygame_gui.core.ObjectID(\"select-done\", \"action_button\"),\n )\n self.addButtonEvent(\"select-done\", self.clickDone)\n if self.key_index == 0:\n self.done_button.disable()\n self._all_objs.append(self.done_button)\n super().generateObjects()\n\n def createBotImage(self, index, bg=None):\n from ev3sim.visual.manager import ScreenObjectManager\n from ev3sim.visual.utils import worldspace_to_screenspace\n from ev3sim.visual.objects import Text\n\n width = 0\n lengths = []\n surfaces = []\n for text_line in self.bot_keys[index].replace(\"\\\\n\", \"\\n\").split(\"\\n\"):\n text_object = Text()\n text_object.initFromKwargs(\n text=text_line,\n hAlignment=\"m\",\n vAlignment=\"m\",\n )\n named_surface = ScreenObjectManager.instance.screen.copy()\n named_surface.fill(pygame.Color(\"#181A25\") if bg is None else bg)\n text_object.applyToScreen(named_surface)\n surfaces.append(named_surface)\n lengths.append(text_object.rect.height)\n width = max(width, text_object.rect.width)\n line_spacing = 10\n s = max(width, sum(lengths) + (len(lengths) - 1) * line_spacing) + 30\n pos = worldspace_to_screenspace((0, 0))\n cropped_surface = pygame.Surface((s, s))\n cropped_surface.fill(pygame.Color(\"#181A25\") if bg is None else bg)\n cur_y = (s - (sum(lengths) + (len(lengths) - 1) * line_spacing)) // 2\n for y, surface in zip(lengths, surfaces):\n cropped_surface.blit(surface, (0, cur_y), (pos[0] - s // 2, pos[1] - (y + 1) // 2 - 1, s, y + 2))\n cur_y += y + line_spacing\n return pygame_gui.elements.UIImage(\n relative_rect=pygame.Rect(0, 0, *self._size),\n image_surface=cropped_surface,\n manager=self,\n object_id=pygame_gui.core.ObjectID(f\"bot-image-{self.bot_keys[index]}\"),\n )\n\n def sizeBotImage(self, index, big_mode=False):\n preview_size = self._size[0] / 4, self._size[1] / 4\n preview_size = (\n min(preview_size[0], (preview_size[1] * 4) // 3),\n min(preview_size[1], (preview_size[0] * 3) // 4),\n )\n if big_mode:\n # beeg\n self.bot_loc_spots[index].set_dimensions((preview_size[0], preview_size[0]))\n self.bot_loc_spots[index].set_position(\n (\n self._size[0] * 0.9 - preview_size[0],\n self._size[1] * 0.1 + preview_size[1] + 20 + (preview_size[0] * 1.1) * index,\n )\n )\n else:\n self.bot_loc_spots[index].set_dimensions((preview_size[0] * 0.45, preview_size[0] * 0.45))\n self.bot_loc_spots[index].set_position(\n (\n self._size[0] * 0.9 - preview_size[0] * (1 if index % 2 == 0 else 0.45),\n self._size[1] * 0.1 + preview_size[1] + 20 + (index // 2) * preview_size[0] * 0.55,\n )\n )\n\n def initWithKwargs(self, **kwargs):\n self.in_error = False\n self.first_launch = True\n batch = kwargs.get(\"batch_file\", None)\n self.batch = batch\n if batch is None:\n # We are simply viewing the bots to edit or manage.\n self.bot_keys = []\n else:\n self.key_index = 0\n with open(batch, \"r\") as f:\n b_config = yaml.safe_load(f)\n preset = b_config[\"preset_file\"]\n fname = find_abs(preset, allowed_areas=preset_locations())\n with open(fname, \"r\") as f:\n p_config = yaml.safe_load(f)\n self.bot_keys = p_config[\"bot_names\"]\n self.bot_values = [None] * len(self.bot_keys)\n self.preview_images = [None] * len(self.bot_keys)\n self.bot_select_index = 0\n self.select_enable = False\n self.code_enable = False\n self.edit_enable = False\n self.remove_enable = False\n self.bot_index = -1\n self.next = kwargs.get(\"next\", None)\n self.next_kwargs = kwargs.get(\"next_kwargs\", {})\n super().initWithKwargs(**kwargs)\n\n def clickEdit(self):\n # Shouldn't happen but lets be safe.\n if self.bot_index == -1:\n return\n from ev3sim.visual.manager import ScreenObjectManager\n\n ScreenObjectManager.instance.pushScreen(\n ScreenObjectManager.SCREEN_BOT_EDIT,\n bot_file=self.available_bots[self.bot_index][1],\n bot_dir_file=self.available_bots[self.bot_index][2:4],\n )\n\n ScreenObjectManager.instance.screens[ScreenObjectManager.SCREEN_BOT_EDIT].clearEvents()\n\n def clickCode(self):\n from ev3sim.utils import open_file, APP_VSCODE, APP_MINDSTORMS\n\n # Shouldn't happen but lets be safe.\n if self.bot_index == -1:\n return\n with open(os.path.join(self.available_bots[self.bot_index][1], \"config.bot\")) as f:\n conf = yaml.safe_load(f)\n\n if conf.get(\"type\", \"python\") == \"mindstorms\":\n script_location = conf.get(\"script\", \"program.ev3\")\n\n open_file(os.path.join(self.available_bots[self.bot_index][1], script_location), APP_MINDSTORMS)\n else:\n script_location = conf.get(\"script\", \"code.py\")\n\n open_file(\n os.path.join(self.available_bots[self.bot_index][1], script_location),\n APP_VSCODE,\n folder=os.path.join(find_abs_directory(\"workspace\")),\n )\n\n def clickSelect(self):\n # Shouldn't happen but lets be safe.\n if self.bot_index == -1:\n return\n self.setBotAtIndex(self.key_index)\n self.incrementBotSelectIndex()\n self.regenerateObjects()\n\n def clickDone(self):\n with open(self.batch, \"r\") as f:\n json_obj = yaml.safe_load(f)\n json_obj[\"bots\"] = [x for x in self.bot_values if x is not None]\n string = yaml.dump(json_obj)\n with open(self.batch, \"w\") as f:\n f.write(string)\n from ev3sim.visual.manager import ScreenObjectManager\n\n ScreenObjectManager.instance.popScreen()\n if self.next is not None:\n ScreenObjectManager.instance.pushScreen(self.next, **self.next_kwargs)\n\n def clickNew(self):\n from ev3sim.visual.manager import ScreenObjectManager\n\n ScreenObjectManager.instance.pushScreen(ScreenObjectManager.SCREEN_BOT_EDIT)\n\n def onSave(filename):\n self.regenerateObjects()\n for i, (_, _, _, bot) in enumerate(self.available_bots):\n if bot == filename:\n self.setBotIndex(i)\n\n ScreenObjectManager.instance.screens[ScreenObjectManager.SCREEN_BOT_EDIT].clearEvents()\n ScreenObjectManager.instance.screens[ScreenObjectManager.SCREEN_BOT_EDIT].onSave = onSave\n\n def clickRemove(self):\n # Shouldn't happen but lets be safe.\n if self.bot_index == -1:\n return\n import shutil\n\n shutil.rmtree(self.available_bots[self.bot_index][1])\n self.setBotIndex(-1)\n\n def handleEvent(self, event):\n super().handleEvent(event)\n if event.type == pygame.KEYDOWN:\n if event.key in [pygame.K_DOWN, pygame.K_w]:\n self.incrementBotIndex(1)\n elif event.key in [pygame.K_UP, pygame.K_s]:\n self.incrementBotIndex(-1)\n elif event.key == pygame.K_RETURN:\n self.clickCode()\n\n def setBotIndex(self, new_index):\n self.bot_index = new_index\n if len(self.bot_keys) == 0:\n self.code_enable = new_index != -1\n self.edit_enable = new_index != -1\n self.remove_enable = new_index != -1 and not self.available_bots[new_index][2].startswith(\"package\")\n else:\n self.select_enable = new_index != -1\n self.regenerateObjects()\n\n def incrementBotIndex(self, amount):\n if self.bot_index == -1:\n new_index = len(self.available_bots) + amount if amount < 0 else amount - 1\n else:\n new_index = self.bot_index + amount\n new_index %= len(self.bot_buttons)\n self.setBotIndex(new_index)\n\n def setBotAtIndex(self, index):\n self.bot_values[index] = self.available_bots[self.bot_index][0]\n with open(os.path.join(self.available_bots[self.bot_index][1], \"config.bot\"), \"r\") as f:\n config = yaml.safe_load(f)\n bot_preview = os.path.join(self.available_bots[self.bot_index][1], config.get(\"preview_path\", \"preview.png\"))\n self.preview_images[index] = pygame.image.load(bot_preview)\n self.regenerateObjects()\n\n def incrementBotSelectIndex(self):\n # Select the next key\n self.key_index += 1\n if self.key_index == len(self.bot_keys):\n # We've selected all the bots. Save.\n self.clickDone()\n else:\n # Update the screen.\n self.regenerateObjects()\n\n def onPop(self):\n self.setBotIndex(-1)\n","sub_path":"ev3sim/visual/menus/bot_menu.py","file_name":"bot_menu.py","file_ext":"py","file_size_in_byte":22621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"403908313","text":"import yaml, sys, logging\n\nfrom sqlite3 import Error\nfrom os.path import dirname, join, abspath\nsys.path.append(abspath(join(dirname(__file__), '..')))\n\nfrom src.Service.PriceChecker.PriceChecker import PriceChecker\nfrom src.Service.PriceDatabaseBuilder.PriceDatabaseBuilder import PriceDatabaseBuilder\nfrom src.Dao.Parts import Parts\nfrom src.Service.DatabaseConnection.DatabaseConnection import DatabaseConnection\nfrom src.Service.WebScraper.WebScraper import WebScraper\n\nlogger = logging.getLogger('PartCost')\nlogger.setLevel(logging.DEBUG)\n\nfile_handler = logging.FileHandler('/var/log/app.log')\nfile_handler.setLevel(logging.DEBUG)\n\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nfile_handler.setFormatter(formatter)\n\nlogger.addHandler(file_handler)\n\ndocument = open('/opt/projects/config/powersupply.yaml', 'r')\ndatabase_file_name = \"/opt/projects/db/parts.db\"\npartsList = yaml.safe_load(document)\n\ndatabaseConnectionService = DatabaseConnection(database_file_name, logger)\npriceChecker = PriceChecker(partsList, logger, WebScraper())\n\ntry:\n databaseConnection = databaseConnectionService.create_connection()\n\n priceDatabaseBuilder = PriceDatabaseBuilder(database_file_name, logger, databaseConnection)\n partsDao = Parts(database_file_name, logger, databaseConnection)\n\n priceDatabaseBuilder.create_table()\n partPrices = priceChecker.check_prices()\n partsDao.insert_parts(partPrices)\nexcept Error as e:\n logger.error(\"Exception occurred in PowerSupplyHandler: {0}\".format(e))\n","sub_path":"bin/PowerSupplyHandler.py","file_name":"PowerSupplyHandler.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"259910531","text":"import random\nimport os\nimport inspect\nimport sys\nimport shutil \nimport glob\nimport shapely\nfrom shapely import wkt\nfrom osgeo import ogr\nfrom ogr import *\n\nconfig_file = './config.txt'\n\ngeo_types = ['POLYGON', 'POINT', 'LINESTRING']\ngeo_collection_types = ['MULTIPOLYGON', 'MULTIPOINT', 'MULTILINESTRING', 'GEOMETRYCOLLECTION']\ncurve_types = ['CIRCULARSTRING','MULTICURVE','COMPOUNDCURVE']\nsurface_types = ['CURVEPOLYGON','MULTISURFACE','SURFACE']\n\ndef is_geometry(geo):\n geo = geo.strip().upper()\n\n for x in geo_types:\n if geo.startswith(x) and len(geo) != len(x):\n return True\n else:\n continue\n \n return False\n\ndef is_geometrycollection(geo):\n geo = geo.strip().upper()\n\n for x in geo_collection_types:\n if geo.startswith(x):\n return True\n else:\n continue\n \n return False\n\ndef is_geometrytype(geo):\n geo = geo.strip().upper()\n\n arr = []\n arr.extend(geo_types)\n arr.extend(geo_collection_types)\n arr.extend(curve_types)\n arr.extend(surface_types)\n\n for x in arr:\n if x in geo:\n return True\n else:\n continue\n \n return False\n\ndef is_empty(geo):\n geo = geo.strip().upper()\n if geo.endswith('EMPTY'):\n return True\n else:\n return False\n\n\ndef is_curve(geo):\n geo = geo.strip().upper()\n\n for x in curve_types:\n if geo.startswith(x):\n return True\n else:\n continue\n\n return False\n\ndef is_surface(geo):\n geo = geo.strip().upper()\n\n for x in surface_types:\n if geo.startswith(x):\n return True\n else:\n continue\n\n return False\n\n\n\nUNIT = 0.0001\nEPOCH = 1e-8\nEPOCH_CURVE = 1e-2\nEPOCH_SURFACE = 1e-2\nEPOCH_CURVE_RELATIVE = 1e-2\nEPOCH_SURFACE_RELATIVE = 1e-2\n\n# def compare_geometry(x, y):\n# arct = pygeos.Geometry(x)\n# pgis = pygeos.Geometry(y)\n# dist = pygeos.measurement.hausdorff_distance(arct, pgis)\n# arct_length = pygeos.measurement.length(arct)\n# pgis_length = pygeos.measurement.length(pgis)\n# max_len = max(arct_length, pgis_length)\n# if dist > max_len * UNIT:\n# return False\n# else:\n# return True\n\ndef compare_geometry(x, y):\n arct = wkt.loads(x)\n pgis = wkt.loads(y)\n\n if x.upper().endswith('EMPTY') and y.upper().endswith('EMPTY'):\n return True\n \n result = arct.equals_exact(pgis, EPOCH)\n\n # if not result:\n # print(arct, pgis)\n \n return result\n\ndef compare_geometrycollection(x, y):\n arct = wkt.loads(x)\n pgis = wkt.loads(y)\n # arct = CreateGeometryFromWkt(x)\n # pgis = CreateGeometryFromWkt(y)\n result = arct.equals(pgis)\n\n # if not result:\n # print(arct, pgis)\n \n return result\n\ndef compare_floats(x, y):\n\n x = float(x)\n y = float(y)\n if abs((x - y)) <= EPOCH:\n return True\n else:\n # print(x, y)\n return False\n\ndef compare_float(x, y, z, precision_error):\n\n x = float(x)\n y = float(y)\n z = float(z)\n if abs((x - y)) <= precision_error and abs((x-z)) <= precision_error and abs((y-z)) <= precision_error:\n return True\n else:\n # print(x, y)\n return False\n\ndef compare2float_relative(x_base, y_check, relative_error):\n x = float(x_base)\n y = float(y_check)\n if ((abs(x_base - y_check)) / (abs(x_base))) <= relative_error:\n return True\n else:\n return False\n\n\ndef compare3float_relative(x_base, y_check, z_intersection, relative_error):\n return compare2float_relative(x_base, y_check, relative_error) and compare2float_relative(x_base, z_intersection,relative_error) and compare2float_relative(y_check, z_intersection, relative_error)\n\n\ndef compare_curve(x, y):\n arct = CreateGeometryFromWkt(x)\n pgis = CreateGeometryFromWkt(y)\n\n intersection_length = Geometry.Length(Geometry.Intersection(arct,pgis))\n arct_length = Geometry.Length(arct)\n pgis_length = Geometry.Length(pgis)\n #result = compare_float(intersection_length, arct_length, pgis_length,EPOCH_CURVE)\n result = compare3float_relative(pgis_length, arct_length, intersection_length,EPOCH_CURVE_RELATIVE)\n return result\n\ndef compare_surface(x, y):\n arct = CreateGeometryFromWkt(x)\n pgis = CreateGeometryFromWkt(y)\n\n intersection_area = Geometry.Area(Geometry.Intersection(arct,pgis))\n arct_area = Geometry.Area(arct)\n pgis_area = Geometry.Area(pgis)\n\n result = compare3float_relative(pgis_area, arct_area, intersection_area, EPOCH_SURFACE_RELATIVE)\n #result = compare_float(intersection_area, arct_area, pgis_area, EPOCH_SURFACE)\n return result\n\ndef convert_str(strr):\n if strr.lower() == 'true' or strr.lower() == 't':\n return True\n elif strr.lower() == 'false' or strr.lower() == 'f':\n return False\n \n try:\n x = float(strr)\n return x\n except:\n pass\n\n return strr\n\ndef compare_one(result, expect):\n x = result[1]\n y = expect[1]\n # print('result: %s' % str(x))\n # print('expect: %s' % str(y))\n\n x = convert_str(x)\n y = convert_str(y)\n\n try:\n if isinstance(x, bool):\n flag = (x == y)\n if not flag:\n print(result[0], x, expect[0], y)\n return flag\n\n if isinstance(x, str):\n x = x.strip().upper()\n y = y.strip().upper()\n \n # check order : empty -> geo_types -> geocollection_types -> curve -> surface\n if (is_empty(x) and is_empty(y)):\n return True\n\n elif is_geometry(x) and is_geometry(y):\n flag = compare_geometry(x, y)\n if not flag:\n print(result[0], x, expect[0], y)\n return flag\n\n elif is_geometrycollection(x) and is_geometrycollection(y):\n flag = compare_geometrycollection(x, y)\n if not flag:\n print(result[0], x, expect[0], y)\n return flag\n\n elif is_curve(x) and is_curve(y):\n flag = compare_curve(x, y)\n if not flag:\n print(result[0], x, expect[0], y)\n return flag\n\n elif is_surface(x) and is_surface(y):\n flag = compare_surface(x, y)\n if not flag:\n print(result[0], x, expect[0], y)\n return flag\n\n else:\n if is_geometrytype(x) and is_geometrytype(y):\n flag = (x == y)\n if not flag:\n print(result[0], x, expect[0], y)\n return flag\n\n print(result[0], x, expect[0], y)\n return False\n\n if isinstance(x, int) or isinstance(x, float):\n flag = compare_floats(x, y)\n if not flag:\n print(result[0], x, expect[0], y)\n return flag\n except Exception as e:\n flag = False\n return flag\n\n\ndef compare_results(arctern_results, postgis_results):\n\n with open(arctern_results, 'r') as f:\n # arctern = f.readlines()\n arct_arr = []\n for (num, value) in enumerate(f, 1):\n if value.strip() != '':\n arct_arr.append((num, value.strip()))\n\n # arc = [list(eval(x.strip()).values())[0] for x in arctern if len(x.strip()) > 0]\n # print(arc)\n\n with open(postgis_results, 'r') as f:\n # postgis = f.readlines()\n pgis_arr = []\n for (num, value) in enumerate(f, 1):\n if value.strip() != '':\n pgis_arr.append((num, value.strip()))\n # pgis = [x.strip() for x in postgis if len(x.strip()) > 0]\n # print(pgis)\n \n flag = True\n\n if len(arct_arr) != len(pgis_arr):\n print('test result size: %s and expected result size: %s, NOT equal, check the two result files' % (len(arct_arr), len(pgis_arr)))\n return False\n\n for x, y in zip(arct_arr, pgis_arr):\n res = compare_one(x, y)\n flag = flag and res\n\n return flag\n\ndef parse(config_file):\n with open(config_file, 'r') as f:\n lines = f.readlines()\n xs = [x.strip().split('=') for x in lines if not x.strip().startswith('#')]\n return xs\n\n# arc_result_dir = './arctern_results'\narc_result_dir = '/tmp/arctern_results'\npgis_result_dir = './expected/results'\n\ndef compare_all():\n configs = parse(config_file)\n if len(configs) == 0:\n print('No Arctern test results found, maybe something wrong in config file, please check: %s' % config_file)\n return 0\n\n for x in configs:\n \n # arctern_result = os.path.join(arc_result_dir, x[0] + '.json')\n arctern_result = os.path.join(arc_result_dir, x[0] + '.csv')\n postgis_result = os.path.join(pgis_result_dir, x[3] + '.out')\n print('Arctern test: %s, result compare started, test result: %s, expected result: %s' % (x[0], arctern_result, postgis_result))\n \n if not os.path.isfile(arctern_result):\n print('Arctern test: %s, result: FAILED, reason: %s' % (x[0], 'test result not found [%s]' % arctern_result))\n continue\n\n if not os.path.isfile(postgis_result):\n print('Arctern test: %s, result: FAILED, reason: %s' % (x[0], 'expected result not found [%s]' % postgis_result))\n continue\n\n res = compare_results(arctern_result, postgis_result)\n if res == True:\n print('Arctern test: %s, result: PASSED' % x[0])\n else:\n print('Arctern test: %s, result: FAILED' % x[0])\n\n\ndef update_quote(file_path):\n with open(file_path, 'r') as f:\n content = f.read()\n update = content.replace(r'\"', '')\n with open(file_path, 'w') as f:\n f.write(update)\n\ndef update_bool(file_path):\n with open(file_path, 'r') as f:\n content = f.read()\n update = content.replace('true', 'True').replace('false', 'False')\n with open(file_path, 'w') as f:\n f.write(update)\n\ndef update_result():\n arr = ['run_test_st_issimple', 'run_test_st_intersects', 'run_test_st_contains', 'run_test_st_crosses', 'run_test_st_isvalid_1', 'run_test_st_overlaps', 'run_test_st_touches', 'run_test_st_within', 'run_test_st_equals_1', 'run_test_st_equals_2']\n configs = parse(config_file)\n if len(configs) == 0:\n print('No Arctern test results found, maybe something wrong in config file, please check: %s' % config_file)\n return 0\n\n for x in configs:\n arctern_result = os.path.join(arc_result_dir, x[0] + '.csv')\n if not os.path.isfile(arctern_result):\n print('Arctern test: %s, result: FAILED, reason: %s' % (x[0], 'test result not found [%s]' % arctern_result))\n continue\n\n if x[0] not in arr:\n update_quote(arctern_result)\n else:\n update_bool(arctern_result)\n \n\nif __name__ == '__main__':\n #compare.py unittest cases (expected no AssertionError) \n #test compare EMPTY\n geo1 = 'POINT EMPTY'\n geo2 = 'POINT EMPTY'\n geo3 = 'CIRCULARSTRING EMPTY'\n geo4 = 'POLYGON((0 0,1000000 0,1000000 2000000,0 0))'\n assert True == compare_one([1,geo1],[1,geo2])\n assert True == compare_one([2,geo1],[2,geo3])\n # assert False == compare_one([3,geo1],[3,geo4])\n\n #test geo_types\n geo1 = 'POLYGON((0 0,100000000 0,100000000 100000000,0 0))'\n geo2 = 'POLYGON((0 0,100000000 0,100000000 100000000.000000001,0 0))'\n geo3 = 'POLYGON((0 0,100000000 0,100000000 200000000,0 0))'\n assert True == compare_one([4,geo1],[4,geo2])\n # assert False == compare_one([5,geo1],[5,geo3])\n\n\n #test geo_collection_types\n geo1 = 'GEOMETRYCOLLECTION (POINT (2 1),LINESTRING (0 0,1 1,2 3),POLYGON((0 0,1000000 0,1000000 1000000,0 0)))'\n geo2 = 'GEOMETRYCOLLECTION (LINESTRING (0 0,1 1,2 3),POLYGON((0 0,1000000.000000000003 0,1000000 1000000,0 0)),POINT(2 1))'\n geo3 = 'GEOMETRYCOLLECTION (POINT (2 1),LINESTRING (0 0,1 2,2 3),POLYGON((0 0,2000000 0,1000000 1000000,0 0)))'\n \n assert True == compare_one([6,geo1],[6,geo2])\n # assert False == compare_one([7,geo1],[7,geo3])\n\n #test curve\n geo1 = 'CIRCULARSTRING (0 2, -1 1,0 0, 0.5 0, 1 0, 2 1, 1 2, 0.5 2, 0 2)'\n geo2 = 'CIRCULARSTRING (0 2, -1 1,0 0, 0.5 0, 1 0, 2 1, 1 2, 0.5 2, 0 2)'\n geo3 = 'CIRCULARSTRING (28 8882, -1 1,0 0, 331.5 0, 1 0, 2 1, 1 2, 0.5 2, 0 2)'\n #geo3 = 'CIRCULARSTRING (0 2, -1 1,0 0, 331.5 0, 1 0, 2 1, 1 2, 0.5 2, 0 2)' # hit assert ex!\n\n geo4 = 'COMPOUNDCURVE(CIRCULARSTRING(0 2, -1 1,1 0),CIRCULARSTRING( 1 0, 2 1, 1 2),(1 2, 0.5 2, 0 2))'\n geo5 = 'COMPOUNDCURVE(CIRCULARSTRING(0 2, -1 1,1 0),CIRCULARSTRING( 1 0, 2 1, 1 2),(1 2, 0.5 2, 0 2))'\n geo6 = 'COMPOUNDCURVE(CIRCULARSTRING(0 2, -1 1,11 0),CIRCULARSTRING( 11 0, 2 1, 1 2),(1 2, 0.5 2, 0 2))'\n \n geo7 = 'MULTICURVE ((5 5, 3 5, 3 3, 0 3), CIRCULARSTRING (0 0, 0.2 1, 0.5 1.4), COMPOUNDCURVE(LINESTRING(0 2, -1 1,1 0),CIRCULARSTRING( 1 0, 2 1, 1 2),(1 2, 0.5 2, 0 2)))'\n geo8 = 'MULTICURVE ((5 5, 3 5, 3 3, 0 3), CIRCULARSTRING (0 0, 0.2 1, 0.5 1.4), COMPOUNDCURVE(LINESTRING(0 2, -1 1,1 0),CIRCULARSTRING( 1 0, 2 1, 1 2),(1 2, 0.5 2, 0 2)))'\n geo9 = 'MULTICURVE ((5 5, 3 5, 3 3, 0 3), CIRCULARSTRING (0 0, 0.2 1, 0.5 1.4), COMPOUNDCURVE(LINESTRING(0 2, -1 1,1 0),CIRCULARSTRING( 1 0, 2 1, 1 2),(1 2, 0.5 2, 0 3)))'\n \n assert True == compare_one([8,geo1],[8,geo2])\n # assert False == compare_one([9,geo1],[9,geo3])\n assert True == compare_one([10,geo4],[10,geo5])\n # assert False == compare_one([11,geo4],[11,geo6])\n assert True == compare_one([12,geo7],[12,geo8])\n # assert False == compare_one([13,geo7],[13,geo9])\n\n #test surface \n geo1 = 'CURVEPOLYGON(CIRCULARSTRING(0 0, 4 0, 4 4, 0 4, 0 0),(1 1, 3 3, 3 1, 1 1))'\n geo2 = 'CURVEPOLYGON(CIRCULARSTRING(0 0, 4 0, 4 4, 0 4, 0 0),(1 1, 3 3, 3 1, 1 1))'\n geo3 = 'CURVEPOLYGON(CIRCULARSTRING(0 0, 4 0, 4 4, 0 4, 0 0),(1 1, 3 3, 3.3 1, 1 1))'\n\n geo4 = 'MULTISURFACE (CURVEPOLYGON (CIRCULARSTRING (-2 0, -1 -1, 0 0, 1 -1, 2 0, 0 2, -2 0), (-1 0, 0 0.5, 1 0, 0 1, -1 0)), ((7 8, 10 10, 6 14, 4 11, 7 8)))'\n geo5 = 'MULTISURFACE (CURVEPOLYGON (CIRCULARSTRING (-2 0, -1 -1, 0 0, 1 -1, 2 0, 0 2, -2 0), (-1 0, 0 0.5, 1 0, 0 1, -1 0)), ((7 8, 10 10, 6 14, 4 11, 7 8)))'\n geo6 = 'MULTISURFACE (CURVEPOLYGON (CIRCULARSTRING (-2 0, -1 -1, 0 0, 1 -1, 2 0, 0 2, -2 0), (-1 0, 0 0.5, 1 0, 0 1, -1 0)), ((7 8, 10 10, 6 14, 4 13, 7 8)))'\n \n update_result()\n # r = compare_results('/tmp/arctern_results/run_test_union_aggr_curve.csv', './expected/results/st_union_aggr_curve.out')\n # r = compare_results('/tmp/arctern_results/run_test_st_area.csv', './expected/results/st_area.out')\n # r = compare_results('/tmp/arctern_results/run_test_st_transform.csv', './expected/results/st_transform.out')\n # r = compare_results('/tmp/arctern_results/run_test_st_transform1.csv', './expected/results/st_transform1.out')\n # r = compare_results('/tmp/arctern_results/run_test_st_crosses.csv', './expected/results/st_crosses.out')\n # r = compare_results('/tmp/results/test_curvetoline/part-00000-034d8bf0-cc68-4195-8fcf-c23390524865-c000.json', './expected/results/st_curvetoline.out')\n # r = compare_results('/tmp/arctern_results/run_test_st_geometrytype.json', './expected/results/st_geometrytype.out')\n # exit(0)\n\n compare_all()\n assert True == compare_one([14,geo1],[14,geo2])\n # assert False == compare_one([15,geo1],[15,geo3])\n assert True == compare_one([16,geo4],[16,geo5])\n # assert False == compare_one([17,geo4],[17,geo6])\n","sub_path":"tests/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":15539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"179372338","text":"def main():\n A = list(map(int, input().split()))\n A.sort()\n # print(A)\n ans = 0\n for i in range(1, len(A)):\n ans += abs(A[i] - A[i - 1])\n print(ans)\n\n\nmain()\n","sub_path":"abc/abc103a.py","file_name":"abc103a.py","file_ext":"py","file_size_in_byte":183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"208226981","text":"file = open('table.txt', 'w')\n\ndef makeRow(i, d):\n file.write(\"\\n\")\n file.write(\" \\n\" % (i, d))\n file.write(\" $%s%d\\n\" % (i, d))\n file.write(\" \\n\" % (i, d))\n file.write(\" \\n\" % (i, d))\n file.write(\" \\n\" % (i, d))\n file.write(\"\\n\")\n\ndef separator():\n file.write(\"\\n\")\n\ndef createRegisters(letter, amount):\n for i in range(0, amount):\n makeRow(letter, i)\n separator()\n\ncreateRegisters(\"v\", 2)\ncreateRegisters(\"a\", 4)\ncreateRegisters(\"t\", 10)\ncreateRegisters(\"s\", 9)\n\n","sub_path":"scripts/table_maker.py","file_name":"table_maker.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"223109287","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.14-x86_64/egg/irekua_permissions/annotations/votes.py\n# Compiled at: 2019-10-27 21:48:09\n# Size of source mod 2**32: 806 bytes\nfrom .annotations import view as annotation_view\n\ndef view(user, vote):\n annotation = vote.annotation\n return annotation_view(user, annotation)\n\n\ndef create(user, annotation):\n if user.is_special:\n return True\n else:\n item = annotation.item\n licence = item.licence\n licence_type = licence.licence_type\n if not licence.is_active:\n return True\n if licence_type.can_vote_annotations:\n return True\n collection = item.collection\n collection_type = collection.collection_type\n if collection_type.is_admin(user):\n return True\n if collection.is_admin(user):\n return True\n return collection.has_user(user) or False\n return collection.has_permission(user, 'add_collection_annotation_vote')\n\n\ndef change(user, vote):\n pass","sub_path":"pycfiles/irekua_permissions-0.1-py3.7/votes.cpython-37.py","file_name":"votes.cpython-37.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"202963218","text":"\ndef isSushu(a):\n if a<=1:\n return 0\n else:\n valid=1\n for i in range(2,a):\n if a%2==0:\n valid=0\n break\n return valid\ndef isHuiwen(a):\n stra=str(a)\n length=len(stra)\n valid=1 \n for i in range(int(length/2)):\n if stra[i]!=stra[length-1-i]:\n valid=0\n break\n return valid\nn=input()\njudge1=isSushu(n)\njudge2=isHuiwen(n)\nwhile judge1!=1 or judge2!=1:\n n+=1\n judge1=isSushu(n)\n judge2=isHuiwen(n)\nprint(n)","sub_path":"Code/CodeRecords/2244/60753/254831.py","file_name":"254831.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"334017618","text":"import cleaning_module as cl\nimport csv\n\n\ncsvFile = open('D:\\mhcc_r9.csv')\n\ndaydict = {\"\": \"\", 1: \"Mon\", 2: \"Tue\", 3: \"Wed\", 4: \"Thu\", 5: \"Fri\", 6: \"Sat\", 7: \"Sun\"}\n\ncsvDReader = csv.DictReader(csvFile)\n\noutputFile = open('output.csv', 'w', newline='')\noutputWriter = csv.writer(outputFile)\nt_count = 0\ncount_gen_pairing = 0\ncount_gof = 0\ncount_day_off = 0\ncount_rule_relax = 0\ncount_time_off = 0\ncount_spec_pairing = 0\ncount_spec_time_off = 0\ncount_gen_time_off = 0\ncount_qual_time_off = 0\n\nfor row in csvDReader:\n bid_type = row['.']\n crewid = row['Number']\n avoid = str(cl.get_avoid(row['Pref Type']))\n max_times_roster = cl.setMaxTimesRoster(row['Rqd'])\n region = cl.get_region(row['Rgn'])\n layover = cl.get_transit(row['L/O'])\n transit = cl.get_layover(row['Tod/Port'])\n max_lo_nt = row['Nt']\n bid_points = cl.setPoints((int(row['Wt'])))\n pax = cl.get_pax(row['Px'])\n dow_to_list = cl.get_days_from_dow(row['Remarks'])\n day_range = list(cl.group(dow_to_list))\n if bid_type == 'GEN_PAIRING' and len(day_range)!=0:\n date_from = cl.conv_d(row['From'])\n date_to = cl.conv_d(row['Until'])\n for i in range(0,len(day_range)):\n day_from = day_range[i][0]\n day_to = day_range[i][1]\n outputWriter.writerow([bid_type, crewid, date_from, date_to, avoid, daydict[day_from], daydict[day_to], max_times_roster, region, layover, max_lo_nt, transit, pax, bid_points])\n t_count= t_count + 1\n count_gen_pairing = count_gen_pairing + 1\n elif bid_type == 'GEN_PAIRING' and len(day_range)==0:\n date_from = cl.conv_d(row['From'])\n date_to = cl.conv_d(row['Until'])\n outputWriter.writerow([bid_type, crewid, date_from, date_to, avoid, daydict[''], daydict[''], max_times_roster, region, layover, max_lo_nt, transit, pax, bid_points])\n t_count = t_count + 1\n count_gen_pairing = count_gen_pairing + 1\n elif bid_type == 'GOLDEN_DO':\n date_from = cl.conv_d(row['From'])\n outputWriter.writerow([bid_type, crewid, date_from, bid_points])\n t_count = t_count + 1\n count_gof = count_gof + 1\n elif bid_type == 'SPEC_DO':\n date_from = cl.conv_d(row['From'])\n date_to = cl.conv_d(row['Until'])\n outputWriter.writerow([bid_type, crewid, date_from, date_to, bid_points])\n t_count = t_count + 1\n count_day_off = count_day_off + 1\n elif bid_type == 'GROUP_DAYS':\n outputWriter.writerow([bid_type, crewid])\n t_count = t_count + 1\n count_rule_relax = count_rule_relax + 1\n elif bid_type == 'WAIVE_WEEK':\n outputWriter.writerow([bid_type, crewid])\n t_count = t_count + 1\n count_rule_relax = count_rule_relax + 1\n elif bid_type == 'SPEC_TIMEOFF':\n date_from = cl.conv_d(row['From'])\n date_to = cl.get_end_date_time(row['From'], row['Durn'])\n time_from = cl.conv_t(row['From'])\n time_to = date_to[1]\n outputWriter.writerow([bid_type, crewid, date_from, date_to[0], time_from, time_to, bid_points])\n t_count = t_count + 1\n count_spec_time_off = count_spec_time_off + 1\n elif bid_type == 'QUAL_TIMEOFF':\n t_count = t_count + 1\n count_qual_time_off = count_qual_time_off + 1\n elif bid_type == 'GEN_TIMEOFF':\n dates = get_start_end_date_from_wom(row['Remarks'])\n\n\n t_count = t_count + 1\n count_gen_time_off = count_gen_time_off + 1\n elif bid_type == 'SPEC_PAIRING':\n t_count = t_count + 1\n count_spec_pairing = count_spec_pairing + 1\n else:\n t_count = t_count + 1\n continue\n\ncount_translated_bids = count_rule_relax + count_day_off + count_gen_pairing + count_gof\n\n\nprint(\"\\n\"+\"Bids translated : \" + str(count_translated_bids) + \" out of \" + str(t_count)+\"\\n\"\n +\"Generic pairing bids : \"+ str(count_gen_pairing)+\"\\n\"\n +\"Golden day off bis : \"+ str(count_gof)+\"\\n\"\n +\"Day off bids : \"+ str(count_day_off)+\"\\n\"\n +\"Rule relaxations : \"+ str(count_rule_relax)+\"\\n\"\n +\"Specific Time off bids : \"+str(count_spec_time_off))\nprint(\"\\n\"+\"Bids not translated : \"+str(count_qual_time_off+count_gen_time_off+count_spec_pairing)+ \" out of \" + str(t_count)+\"\\n\"\n +\"Specific pairing bids : \"+str(count_spec_pairing)+\"\\n\"\n +\"Quality time off bids : \"+str(count_qual_time_off)+\"\\n\"\n +\"Generic time off bids : \"+str(count_gen_time_off))\n\noutputFile.close()\n\n\n\n\n\n\n","sub_path":"JCR2JCB_export/tests/bids_for_jcb.py","file_name":"bids_for_jcb.py","file_ext":"py","file_size_in_byte":4440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"469042786","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\n@File : package.py\n@Time : 2019/8/27 14:13\n@Author : Crisimple\n@Github : https://crisimple.github.io/\n@Contact : Crisimple@foxmail.com\n@License : (C)Copyright 2017-2019, Micro-Circle\n@Desc : 模块化测试\n\"\"\"\n\nfrom selenium import webdriver\nfrom time import sleep\nfrom util.read_ini import ReadIni\nfrom basic.find_element import FindElement\nimport random\nfrom PIL import Image\nfrom api import ShowapiRequest\n\n\nclass Register(object):\n def __init__(self, url, browser):\n # self.driver = self.get_driver(url=url)\n self.driver = self.get_more_driver(url=url, browser=browser)\n\n # 启动浏览器,打开目标测试页面url\n def get_driver(self, url):\n driver = webdriver.Chrome('../tools/chromedriver.exe')\n driver.get(url=url)\n driver.maximize_window()\n return driver\n\n # 兼容多浏览器执行测试\n def get_more_driver(self, url, browser):\n if browser == 'chrome':\n # 版本 76.0.3809.100(64位)对应的驱动\n driver = webdriver.Chrome('../tools/chromedriver.exe')\n elif browser == 'firefox':\n # FireFox 68.0.2(64位) 对应的驱动,和 chrome 驱动使用有差异\n driver = webdriver.Firefox()\n driver.get(url=url)\n driver.maximize_window()\n return driver\n\n # 定位用户信息,获取元素element\n def get_user_element(self, key):\n find_element = FindElement(self.driver)\n user_element = find_element.get_element(key=key)\n return user_element\n\n # 输入用户信息\n def send_user_info(self, key, data):\n self.get_user_element(key=key).send_keys(data)\n\n # 获取随机数\n def get_range(self):\n number = ''.join(random.sample('abcdefg123456', 8))\n return number\n\n # 获取验证码图片\n def get_captcha_image(self, file_name):\n self.driver.save_screenshot(filename=file_name)\n captcha_element = self.get_user_element('getcode_num')\n left = captcha_element.location['x']\n top = captcha_element.location['y']\n right = captcha_element.size['width'] + left\n height = captcha_element.size['height'] + top\n image = Image.open(file_name)\n img = image.crop((left, top, right, height))\n img.save(file_name)\n\n # 识别图片验证码\n def discern_captcha_image(self, file_name):\n self.get_captcha_image(file_name=file_name)\n # 解析验证码图片中的文字(用第三方的图片验证码识别接口 ShowApiRequest)\n r = ShowapiRequest(\"http://route.showapi.com/184-4\", \"48120\", \"12c017278c0845c2bcda177212d2d2ac\")\n r.addBodyPara(\"img_base64\", \"\")\n r.addBodyPara(\"typeId\", \"35\")\n r.addBodyPara(\"convert_to_jpg\", \"0\")\n r.addBodyPara(\"needMorePrecise\", \"0\")\n r.addFilePara(\"image\", file_name) # 文件上传时设置\n res = r.post()\n text = res.json()[\"showapi_res_body\"][\"Result\"]\n return text\n\n # 主函数\n def main(self):\n register_nickname = self.get_range()\n register_email = self.get_range() + '@163.com'\n register_password = self.get_range() + '@123'\n file_name = '../image/code_image.png'\n # captcha_code = self.discern_captcha_image(file_name=file_name)\n self.send_user_info('register_nickname', register_nickname)\n self.send_user_info('register_email', register_email)\n self.send_user_info('register_password', register_password)\n self.send_user_info('captcha_code', 'qwex5')\n sleep(5)\n self.get_user_element('register-btn').click()\n\n # 异常处理:注册失败进行截图,方便问题排查\n captcha_code_error = self.get_user_element('captcha_code_error')\n if captcha_code_error is None:\n print(\"......恭喜你注册成功了......\")\n else:\n self.driver.save_screenshot('../image/captcha_code_error.png')\n sleep(5)\n self.driver.close()\n\n\nif __name__ == \"__main__\":\n register_url = 'http://www.5itest.cn/register'\n browser = 'chrome'\n r = Register(register_url, browser)\n r.main()\n\n\n","sub_path":"common_model/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":4208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"617348509","text":"import json\nimport unittest\nfrom cluster_test_utils import start_test_cluster, run_cluster_test_with_conf \n\n\nclass DockerTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n start_test_cluster()\n\n def test_with_no_cache(self):\n conf = json.dumps({\n 'sandbox': 'docker', \n 'handler_cache_size': 0, \n 'import_cache_size': 0, \n 'cg_pool_size': 0\n }) \n run_cluster_test_with_conf(conf)\n\n def test_with_handler_cache(self):\n conf = json.dumps({\n 'sandbox': 'docker', \n 'handler_cache_size': 10000000, \n 'import_cache_size': 0, \n 'cg_pool_size': 0\n }) \n run_cluster_test_with_conf(conf)\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"testing/integration-tests/docker_test.py","file_name":"docker_test.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"491275213","text":"# anet.py\n#\n# Raft network layer. Allows messages to be sent/received between\n# different servers. This is only for the Raft Server-Server communication.\n# Not for application level networking. \n\nimport logging\nfrom curio.socket import socket, AF_INET, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR\nimport curio\n\nfrom . import config\n\nasync def send_message_size(sock, nbytes):\n sz = b'%10d' % nbytes\n await sock.sendall(sz)\n \nasync def send_message(sock, msg):\n await send_message_size(sock, len(msg))\n await sock.sendall(msg)\n\nasync def recv_exactly(sock, nbytes):\n parts = [ ]\n while nbytes > 0:\n part = await sock.recv(nbytes)\n if not part:\n raise ConnectionError(\"Connection Closed\")\n parts.append(part)\n nbytes -= len(part)\n return b''.join(parts)\n\nasync def recv_message_size(sock):\n sz = await recv_exactly(sock, 10)\n return int(sz)\n\nasync def recv_message(sock):\n # Need to know how big the message is in order to get it\n size = await recv_message_size(sock)\n return (await recv_exactly(sock, size))\n\n\n\nclass AsyncRaftNetBase:\n '''\n Abstract base class for networking layer\n '''\n async def send(self, dest, msg):\n '''\n Send a message to a specified destination. Does not wait for the\n message to be delivered. Does not guarantee message delivery.\n Returns immediately.\n '''\n raise NotImplementedError()\n\n async def recv(self):\n '''\n Receive a message from any server. Waits until a message arrives.\n Does not include any information about the message sender. If this\n is desired, that information should be encoded as part of the message\n payload itself.\n '''\n raise NotImplementedError()\n \n async def start(self):\n '''\n Start the networking layer. If there are any background servers or\n other things that need to start in the background, launch them here.\n '''\n raise NotImplementedError()\n\nclass AsyncTCPRaftNet(AsyncRaftNetBase):\n def __init__(self, address):\n self.address = address\n self.numservers = len(config.SERVERS)\n self._outgoing = {n : curio.Queue() for n in config.SERVERS }\n self._socks = { n: None for n in config.SERVERS } # The other servers\n self.server_sock = None\n self._msgqueue = curio.Queue() # Incoming messages\n self.log = logging.getLogger(f'net.{address}')\n\n async def send(self, dest, msg):\n await self._outgoing[dest].put(msg)\n\n async def _sender(self, dest):\n while True:\n msg = await self._outgoing[dest].get()\n self.log.debug(\"Sending %r to %s\", msg, dest)\n if self._socks[dest] is None:\n try:\n self.log.debug(\"Trying connection to: %s - %s\", dest, config.SERVERS[dest])\n self._socks[dest] = socket(AF_INET, SOCK_STREAM)\n # Discussion: Connecting to a remote machine might take a long time.\n # Is send() supposed to wait for this to happen? Or does send() try\n # to return as fast as possible? \n await self._socks[dest].connect(config.SERVERS[dest]) # Sore point\n self.log.info(\"Connected to: %s - %s\", dest, config.SERVERS[dest])\n except IOError as err:\n self._socks[dest] = None\n self.log.debug(\"Connection to: %s failed\", exc_info=True)\n if self._socks[dest]:\n try:\n # Discussion. You send a message, but send() blocks due to TCP flow\n # control or some other buffering issue. Is this supposed to happen\n # or should send() return immediately?\n await send_message(self._socks[dest], msg)\n except IOError as err:\n self.log.debug(\"Send to %s failed\", dest, exc_info=True)\n await self._socks[dest].close()\n self._socks[dest] = None\n else:\n self.log.info(\"Server %s offline\", dest)\n\n async def recv(self):\n return await self._msgqueue.get()\n \n async def connection_server(self):\n '''\n Thread that runs in the background listening for connections\n from the other Raft servers. Delivers messages to internal message queue.\n '''\n sock = socket(AF_INET, SOCK_STREAM)\n sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, True)\n sock.bind(config.SERVERS[self.address])\n sock.listen(1)\n self.log.info(\"Server %s running on %s\", self.address, sock)\n while True:\n client, addr = await sock.accept()\n self.log.info(\"Received connection from: %s\", addr)\n await curio.spawn(self.handle_client, client, daemon=True)\n \n async def handle_client(self, sock):\n async with sock:\n while True:\n msg = await recv_message(sock)\n self.log.debug(\"Received message: %r\", msg)\n await self._msgqueue.put(msg)\n\n async def start(self):\n await curio.spawn(self.connection_server, daemon=True)\n for n in config.SERVERS:\n await curio.spawn(self._sender, n, daemon=True)\n\n \n\n\n\n \n \n","sub_path":"dabeaz/rafto/anet.py","file_name":"anet.py","file_ext":"py","file_size_in_byte":5397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"543111663","text":"from keras import backend as K\r\nfrom keras.layers import Activation, Add, Multiply, UpSampling2D, UpSampling3D, Cropping2D, Cropping3D, Lambda\r\nfrom keras.layers.convolutional import Conv2D, Conv2DTranspose\r\nfrom keras.layers.convolutional import Conv3D, Conv3DTranspose\r\nfrom keras.layers.normalization import BatchNormalization\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\nK.set_image_dim_ordering('th')\r\n\r\n\r\ndef grid_attention(dimension, input, g_f, gate, attn_opt, mode):\r\n\r\n kernel_size = (1, 1) if dimension == 2 else (1, 1, 1)\r\n sub_sample_factor = (2, 2) if dimension == 2 else (2, 2, 2)\r\n\r\n channel_axis = 1 if K.image_data_format() == \"channels_first\" else -1\r\n gate_channels = gate._keras_shape[channel_axis]\r\n #input_channels = input._keras_shape[channel_axis]\r\n #g_f_channels = g_f._keras_shape[channel_axis]\r\n g_f_crop_size = np.int((g_f._keras_shape[2] * sub_sample_factor[0] - g_f._keras_shape[2]) / 2)\r\n\r\n # Define the operation\r\n if mode == 'concatenation':\r\n return _concatenation(dimension, input, g_f, gate, gate_channels, g_f_crop_size,\r\n kernel_size, sub_sample_factor, 'relu', attn_opt)\r\n # elif mode == 'concatenation_debug':\r\n # return _concatenation(dimension, input, g_f, in_channels, out_channels, kernel_size, sub_sample_factor,\r\n # sub_sample_kernel_size, 'softplus')\r\n # elif mode == 'concatenation_residual':\r\n # return _concatenation_residual(dimension, input, g_f, in_channels, out_channels, kernel_size,\r\n # sub_sample_factor, sub_sample_kernel_size, 'relu')\r\n else:\r\n raise NotImplementedError('Unknown operation function.')\r\n\r\n\r\ndef _concatenation(dimension, input, g_f, gate, gate_channels, g_f_crop_size, kernel_size, sub_sample_factor,\r\n activation, attn_opt):\r\n\r\n if dimension == 2:\r\n if attn_opt == 1:\r\n # Theta^T * x_ij + Phi^T * gating_signal + bias\r\n theta_x = Conv2D(gate_channels, kernel_size=kernel_size, strides=kernel_size, padding='same')(input)\r\n phi_g_f = Conv2D(gate_channels, kernel_size=kernel_size, strides=kernel_size, padding='same')(g_f)\r\n phi_g_f_up = UpSampling2D(size=sub_sample_factor, interpolation='bilinear')(phi_g_f)\r\n phi_g_f_up_crop = get_cropping_layer(dimension, phi_g_f_up, crop_size=(g_f_crop_size, g_f_crop_size))\r\n\r\n f = Activation(activation)(Add()([theta_x, phi_g_f_up_crop]))\r\n\r\n elif attn_opt == 2:\r\n theta_x = Conv2D(gate_channels, kernel_size=kernel_size, strides=kernel_size, padding='same')(input)\r\n phi_g_f = Conv2D(gate_channels, kernel_size=kernel_size, strides=kernel_size, padding='same')(g_f)\r\n phi_gate = Conv2D(gate_channels, kernel_size=kernel_size, strides=kernel_size, padding='same')(gate)\r\n phi_g_f_up = UpSampling2D(size=sub_sample_factor, interpolation='bilinear')(phi_g_f)\r\n phi_g_f_up_crop = get_cropping_layer(dimension, phi_g_f_up, crop_size=(g_f_crop_size, g_f_crop_size))\r\n phi_gate_up = UpSampling2D(size=sub_sample_factor, interpolation='bilinear')(phi_gate)\r\n\r\n f = Activation(activation)(Add()([theta_x, phi_g_f_up_crop, phi_gate_up]))\r\n\r\n psi_f = Conv2D(1, kernel_size=kernel_size, strides=kernel_size, padding='same')(f)\r\n sigm_psi_f = Activation('sigmoid')(psi_f)\r\n\r\n #y = Multiply()([sigm_psi_f, input])\r\n y = Multiply()([sigm_psi_f, theta_x])\r\n\r\n elif dimension == 3:\r\n if attn_opt == 1:\r\n # Theta^T * x_ij + Phi^T * gating_signal + bias\r\n theta_x = Conv3D(gate_channels, kernel_size=kernel_size, strides=kernel_size, padding='same')(input)\r\n phi_g_f = Conv3D(gate_channels, kernel_size=kernel_size, strides=kernel_size, padding='same')(g_f)\r\n phi_g_f_up = UpSampling3D(size=sub_sample_factor)(phi_g_f)\r\n phi_g_f_up_crop = get_cropping_layer(dimension, phi_g_f_up, crop_size=(g_f_crop_size, g_f_crop_size))\r\n\r\n f = Activation(activation)(Add()([theta_x, phi_g_f_up_crop]))\r\n\r\n elif attn_opt == 2:\r\n theta_x = Conv3D(gate_channels, kernel_size=kernel_size, strides=kernel_size, padding='same')(input)\r\n phi_g_f = Conv3D(gate_channels, kernel_size=kernel_size, strides=kernel_size, padding='same')(g_f)\r\n phi_gate = Conv3D(gate_channels, kernel_size=kernel_size, strides=kernel_size, padding='same')(gate)\r\n phi_g_f_up = UpSampling3D(size=sub_sample_factor)(phi_g_f)\r\n phi_g_f_up_crop = get_cropping_layer(dimension, phi_g_f_up, crop_size=(g_f_crop_size, g_f_crop_size))\r\n phi_gate_up = UpSampling3D(size=sub_sample_factor)(phi_gate)\r\n\r\n f = Activation(activation)(Add()([theta_x, phi_g_f_up_crop, phi_gate_up]))\r\n\r\n psi_f = Conv3D(1, kernel_size=kernel_size, strides=kernel_size, padding='same')(f)\r\n sigm_psi_f = Activation('sigmoid')(psi_f)\r\n\r\n #y = Multiply()([sigm_psi_f, input])\r\n y = Multiply()([sigm_psi_f, theta_x])\r\n\r\n\r\n else:\r\n raise NotImplemented\r\n\r\n return y\r\n#\r\n#\r\n# def UpSampling3DBicubic(stride, **kwargs):\r\n# def layer(x):\r\n# input_shape = K.int_shape(x)\r\n# output_shape = (stride[0] * input_shape[1], stride[1] * input_shape[2], stride[2] * input_shape[3])\r\n# return tf.image.resize_bicubic(x, output_shape, align_corners=True)\r\n# return Lambda(layer, **kwargs)\r\n\r\n\r\ndef get_cropping_layer(dimension, input, crop_size=(16, 16)):\r\n cropping_param = (crop_size, crop_size) if dimension == 2 else (crop_size, crop_size, crop_size)\r\n\r\n if dimension == 2 :\r\n return Cropping2D(cropping=cropping_param)(input)\r\n else :\r\n return Cropping3D(cropping=cropping_param)(input)","sub_path":"architectures/attention_gates_v2.py","file_name":"attention_gates_v2.py","file_ext":"py","file_size_in_byte":5822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"525796722","text":"from geoalchemy2 import Geometry\nfrom geoalchemy2.shape import from_shape, to_shape\nfrom shapely.geometry import LineString, Point, mapping\nfrom sqlalchemy import Column, ForeignKey, Table, engine_from_config\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import configure_mappers, relationship, sessionmaker\nfrom sqlalchemy.schema import MetaData, UniqueConstraint\nfrom sqlalchemy.types import PickleType, String\nfrom zope.sqlalchemy import register as register_transaction_listener\n\nfrom .constants import (\n ASSET_TYPE_BY_ID,\n RECORD_ID_LENGTH,\n RECORD_RETRY_COUNT)\nfrom .exceptions import DatabaseRecordError\nfrom .macros.security import make_random_string\n\n\nCLASS_REGISTRY = {}\nmetadata = MetaData(naming_convention={\n 'ix': 'ix_%(column_0_label)s',\n 'uq': 'uq_%(table_name)s_%(column_0_name)s',\n 'ck': 'ck_%(table_name)s_%(constraint_name)s',\n 'fk': 'fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s',\n 'pk': 'pk_%(table_name)s',\n})\nBase = declarative_base(class_registry=CLASS_REGISTRY, metadata=metadata)\nasset_content = Table(\n 'asset_content', Base.metadata,\n Column('parent_asset_id', String, ForeignKey('asset.id')),\n Column('child_asset_id', String, ForeignKey('asset.id')))\nasset_connection = Table(\n 'asset_connection', Base.metadata,\n Column('left_asset_id', String, ForeignKey('asset.id')),\n Column('right_asset_id', String, ForeignKey('asset.id')))\n\n\nclass RecordMixin(object):\n\n id = Column(String, primary_key=True)\n id_length = RECORD_ID_LENGTH\n\n @classmethod\n def make_unique_record(Class, database, retry_count=RECORD_RETRY_COUNT):\n # Adapted from invisibleroads-records\n count = 0\n id_length = Class.id_length\n while count < retry_count:\n record = Class(id=make_random_string(id_length))\n database.add(record)\n try:\n database.flush()\n except IntegrityError:\n database.rollback()\n else:\n break\n count += 1\n else:\n raise DatabaseRecordError(\n f'could not make unique {Class.__tablename__}')\n return record\n\n\nclass Asset(RecordMixin, Base):\n\n __tablename__ = 'asset'\n utility_id = Column(String)\n name = Column(String)\n type_id = Column(String)\n children = relationship(\n 'Asset', secondary=asset_content,\n primaryjoin='asset_content.c.parent_asset_id == Asset.id',\n secondaryjoin='asset_content.c.child_asset_id == Asset.id',\n backref='parents')\n connections = relationship(\n 'Asset', secondary=asset_connection,\n primaryjoin='asset_connection.c.left_asset_id == Asset.id',\n secondaryjoin='asset_connection.c.right_asset_id == Asset.id')\n _geometry = Column(Geometry(management=True))\n attributes = Column(PickleType)\n\n def __init__(self, **kwargs):\n if 'geometry' in kwargs:\n kwargs['_geometry'] = from_shape(kwargs.pop('geometry'))\n super(Asset, self).__init__(**kwargs)\n\n @property\n def is_locatable(self):\n return ASSET_TYPE_BY_ID[self.type_id[0]].get('locatable', False)\n\n @property\n def location(self):\n if self._geometry is None:\n return\n return self.geometry.coords[0]\n\n @location.setter\n def location(self, location):\n if location is None:\n self._geometry = None\n return\n\n point = Point(location)\n self.geometry = point\n\n for parent in self.parents:\n if parent.type_id == 'l':\n update_line_geometry(parent)\n continue\n if parent._geometry is not None:\n continue\n parent.geometry = point\n\n for child in self.children:\n if child._geometry is not None:\n continue\n child.geometry = point\n\n @property\n def geometry(self):\n if self._geometry is None:\n return\n return to_shape(self._geometry)\n\n @geometry.setter\n def geometry(self, geometry):\n if geometry is not None:\n geometry = from_shape(geometry)\n self._geometry = geometry\n\n def add_child(self, asset):\n if self == asset:\n return\n\n if asset in self.children:\n return\n self.children.append(asset)\n\n # If the child has no location,\n if asset.location is None:\n # Give parent location to child\n asset.location = self.location\n # If the child has a location but parent has no location,\n elif self.location is None:\n # Give location to parent\n self.location = asset.location\n\n if 'l' == self.type_id:\n update_line_geometry(self)\n\n def add_connection(self, asset):\n if self == asset:\n return\n if asset not in self.connections:\n self.connections.append(asset)\n if self not in asset.connections:\n asset.connections.append(self)\n\n def remove_child(self, asset):\n if asset in self.children:\n self.children.remove(asset)\n\n if 'l' == self.type_id:\n update_line_geometry(self)\n\n def remove_connection(self, asset):\n if asset in self.connections:\n self.connections.remove(asset)\n if self in asset.connections:\n asset.connections.remove(self)\n\n def serialize(self):\n d = dict(self.attributes or {}, **{\n 'id': self.id,\n 'typeId': self.type_id,\n 'name': self.name,\n 'connectedIds': [_.id for _ in self.connections],\n 'parentIds': [_.id for _ in self.parents],\n 'childIds': [_.id for _ in self.children],\n })\n if self._geometry is not None:\n d['location'] = self.location\n d['geometry'] = mapping(self.geometry)\n return d\n\n def __repr__(self):\n return f''\n\n __table_args__ = (\n UniqueConstraint(\n 'utility_id', 'name', name='unique_utility_asset_name'),\n )\n\n\ndef includeme(config):\n settings = config.get_settings()\n settings['tm.manager_hook'] = 'pyramid_tm.explicit_manager'\n config.include('pyramid_tm')\n config.include('pyramid_retry')\n database_engine = get_database_engine(settings)\n get_database_session = define_get_database_session(database_engine)\n config.add_request_method(\n lambda r: get_transaction_manager_session(get_database_session, r.tm),\n 'db', reify=True)\n\n\ndef get_database_engine(settings, prefix='sqlalchemy.'):\n engine = engine_from_config(settings, prefix)\n if settings[prefix + 'url'].startswith('sqlite'):\n load_spatialite_sqlite_extension(engine)\n return engine\n\n\ndef define_get_database_session(database_engine):\n get_database_session = sessionmaker()\n get_database_session.configure(bind=database_engine)\n return get_database_session\n\n\ndef get_transaction_manager_session(get_database_session, transaction_manager):\n database_session = get_database_session()\n register_transaction_listener(\n database_session, transaction_manager=transaction_manager)\n return database_session\n\n\ndef load_spatialite_sqlite_extension(engine):\n from sqlalchemy.event import listen\n from sqlalchemy.sql import func, select\n\n def load_spatialite(api_connection, connection_record):\n api_connection.enable_load_extension(True)\n api_connection.load_extension('mod_spatialite.so')\n\n listen(engine, 'connect', load_spatialite)\n engine_connection = engine.connect()\n engine_connection.execute(select([func.InitSpatialMetaData()]))\n engine_connection.close()\n return engine\n\n\ndef update_line_geometry(line_asset):\n line_coordinates = []\n for pole_asset in line_asset.children:\n location = pole_asset.location\n if location is None:\n continue\n line_coordinates.append(location)\n coordinate_count = len(line_coordinates)\n if coordinate_count == 0:\n geometry = None\n elif coordinate_count == 1:\n geometry = Point(line_coordinates)\n else:\n geometry = LineString(line_coordinates)\n line_asset.geometry = geometry\n\n\nconfigure_mappers()\n","sub_path":"asset_tracker/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"447928292","text":"# this program assigns railroads to links using the railroads online.\n# uses completely within buffer type\n\nimport arcpy\nimport pandas\nfrom simpledbf import Dbf5\n\narcpy.env.workspace = r\"C:\\GIS\"\narcpy.env.overwriteOutput = True # overwrite files if its already present\n\nbufferDistance = \"3 Miles\"\n\nrootDirectory = 'Z:\\\\Thesis\\\\Coal Data\\\\downloadedonline\\\\'\n\nlistRRs = ['UP', \"BNSF\", \"NS\", \"KCS\", \"CSXT\", \"CP\", \"CN\"]\nRRCols = ['RR1', 'RR2', 'RR3', 'RR4', 'RR5', 'RR6', 'RR7']\nRRcodes = [802, 777, 555, 400, 712, 105, 103]\n\n# Local variables:\nNewLinks = \"M:\\\\RAIL\\\\OngoingWork\\\\NewLinks.shp\"\n# railroads\nNewLinksDbf = \"M:\\\\RAIL\\\\OngoingWork\\\\NewLinks.dbf\"\nnetwork = Dbf5(NewLinksDbf).to_dataframe()\nnetwork = network[RRCols]\n\n\ndef AddColumn(colName):\n arcpy.DeleteField_management(NewLinks, colName)\n arcpy.AddField_management(NewLinks, colName, \"Double\")\n i = 0\n with arcpy.da.UpdateCursor(NewLinks, colName) as cursor:\n for row in cursor:\n row[0] = network[colName][i]\n i += 1\n cursor.updateRow(row)\n print(\"{0} was updated\".format(colName))\n\n\ndef getnext(j, i):\n if network[RRCols[j]][i] == 0:\n # print(\"{0},{1} = 0\".format(j,i))\n if j < len(RRCols) - 1:\n return (getnext(j + 1, i))\n else:\n return 0\n else:\n dumm = network[RRCols[j]][i]\n network[RRCols[j]][i] = 0\n # print (\"found greater {0}\".format(dumm))\n return dumm\n\n\nfor i in range(len(listRRs)):\n print(\"Finding links for {0}...\".format(listRRs[i]))\n railroad = 'Z:\\\\Thesis\\\\Coal Data\\\\downloadedonline\\\\' + listRRs[i] + '.shp'\n Buffer = \"C:\\\\GIS\\\\Buffer.shp\"\n # create buffer layerProcess: Buffer\n arcpy.Buffer_analysis(railroad, Buffer, bufferDistance, \"FULL\", \"ROUND\", \"NONE\", \"\", \"PLANAR\")\n # select on NewLinks\n arcpy.MakeFeatureLayer_management(NewLinks, 'NewLinks_lyr')\n arcpy.SelectLayerByLocation_management('NewLinks_lyr', \"COMPLETELY_WITHIN\", Buffer, \"\", \"NEW_SELECTION\",\n \"NOT_INVERT\")\n count = arcpy.GetCount_management('NewLinks_lyr')\n print(\"Total number of selected links = {0}\".format(count))\n arcpy.CalculateField_management('NewLinks_lyr', RRCols[i], RRcodes[i], \"PYTHON\")\n print(\"Values for {0} railroad updated\".format(listRRs[i]))\n print(\"Process Completed Successfully\")\n\n# left justify all the RRs\nfor i in range(len(network)):\n for j in range(len(RRCols)):\n if (network[RRCols[j]][i] == 0):\n network[RRCols[j]][i] = getnext(j, i)\n if network[RRCols[j]][i] == 0:\n break\n # print(\"obtained value from getnext = {0}\".format(network[RRCols[j]][i]))\n\n print(\"Row {0} completed\".format(i))\n\n# write the new columns to the dbf file\nfor col in RRCols:\n AddColumn(col)\n","sub_path":"others/old_scripts/assign_railroads_to_links.py","file_name":"assign_railroads_to_links.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"247235271","text":"import os\nimport numpy as np\nfrom thetanmm.model import MontbrioPazoRoxin\nfrom tvb.simulator.lab import *\nfrom tvb.basic.neotraits.api import HasTraits, Attr, Final\n\nimport itertools\n\ndef configure_sim(\n dataset, \n G, \n cut_in=10000, \n sim_len=600e3, \n conn_speed=2.0,\n dt=0.01,\n tavg=True,\n bold=True,\n eeg=False,\n raw=False,\n seed=42,\n nsigma=0.01,\n eta=-4.6,\n J=14.5,\n Delta=0.7,\n tau=1,\n initial_conditions=None,\n mult_noise_eq=None\n):\n \"\"\" Configures simulation with Monbrio model\n\n Parameters\n dataset dictionary of TVB components; obligatory:`connectivity`, other: `eeg`\n G global coupling scaling\n cut_in length [ms] of the discarded initial transient\n sim_len length [ms] of simulation\n conn_speed connection speed \n dt integration step\n tavg include time average monitor\n bold include BOLD monitor\n eeg include EEG monitor, requires the monitor to be provided in the dataset\n raw include RAW monitor\n seed random number generator seed\n nsigma sigma for the noise\n eta Montbrio model parameter\n J Montbrio model parameter\n Delta Montbrio model parameter\n\n Returns:\n sim configured simulator instance\n \"\"\"\n\n conn = dataset[\"connectivity\"]\n conn.speed = np.array([conn_speed]) \n np.fill_diagonal(conn.weights, 0.)\n conn.weights = conn.weights/np.max(conn.weights)\n conn.configure()\n\n mont_model = MontbrioPazoRoxin(\n eta = np.r_[eta],\n J = np.r_[J],\n Delta = np.r_[Delta],\n tau = np.r_[tau],\n )\n con_coupling = coupling.Scaling(a=np.array([G]))\n\n nsig = np.r_[nsigma, nsigma*2] if np.isscalar(nsigma) else nsigma\n\n if mult_noise_eq is not None:\n hiss = noise.Multiplicative(\n nsig=nsig, \n noise_seed=seed,\n b=mult_noise_eq\n )\n else:\n hiss = noise.Additive(nsig=nsig, noise_seed=seed)\n\n integrator = integrators.HeunStochastic(dt=dt, noise=hiss)\n\n\n\n mntrs = []\n if tavg:\n mntrs.append(monitors.TemporalAverage(period=1.))\n if bold:\n BOLD_period=2000\n mntrs.append( monitors.Bold(period=BOLD_period) )\n if eeg:\n eeg_period = 1000/256.\n eeg_monitor = dataset[\"eeg\"]\n eeg_monitor.period = eeg_period\n mntrs.append(eeg_monitor)\n if raw:\n mntrs.append(monitors.Raw())\n\n\n sim = simulator.Simulator(model=mont_model,\n connectivity=conn,\n coupling=con_coupling,\n conduction_speed=conn_speed,\n integrator=integrator,\n monitors=mntrs,\n simulation_length=sim_len + cut_in,\n initial_conditions=initial_conditions\n )\n sim.configure()\n\n return sim\n\ndef generate_initial_conditions_array(sim):\n sim.connectivity.set_idelays(dt=sim.integrator.dt)\n horizon = sim.horizon\n nvar = sim.model.nvar\n nnodes = sim.number_of_nodes\n nmodes = sim.model.number_of_modes\n\n return np.zeros( (horizon, nvar, nnodes, nmodes) )\n\ndef generate_rescaled_initial_conditions(sim, state_variable_range):\n \"\"\"\n Parameters\n sim: pre-configured simulator instance (use sim.configure())\n state_variable_range: ranges to be used instead of `model.state_variable_range`\n\n \"\"\"\n sim.connectivity.set_idelays(dt=sim.integrator.dt)\n horizon = sim.horizon\n nvar = sim.model.nvar\n nnodes = sim.number_of_nodes\n nmodes = sim.model.number_of_modes\n\n initial_conditions = generate_initial_conditions_array(sim)\n\n #var_shape = ( horizon, 1, nnodes, nmodes)\n var_shape = list(initial_conditions.shape)\n var_shape[1] = 1\n for i, var in enumerate(sim.model.state_variables):\n low, high = state_variable_range[var]\n initial_conditions[:,[i],:,:] = np.random.uniform(low=low, high=high, size=(var_shape))\n\n return initial_conditions\n\ndef rescale_nsigma(sig, tau):\n sig_r = sig * 1/(tau**4)\n sig_V = 2 * sig * 1/(tau**2)\n return np.r_[sig_r, sig_V]\n\n\ndef save_sim_output(sim, outputs, out_path, reduced_precision=True):\n keys = [(m.__class__.__name__ + \"_time\", m.__class__.__name__ + \"_data\") for m in sim.monitors]\n flat_outs = dict(\n zip(\n itertools.chain(*keys),\n itertools.chain(*outputs)\n )\n )\n if reduced_precision:\n for k,val in flat_outs.items():\n flat_outs[k] = val.astype(np.float32)\n np.savez( out_path, **flat_outs )\n\ndef run_sim(sim, out_path, cut_in, sim_len):\n if cut_in > 0:\n _ = sim.run(simulation_length=cut_in)\n\n outputs = sim.run(simulation_length=sim_len)\n save_sim_output(sim, outputs, out_path)\n\ndef run_sim_variable_parameter(sim, param_vals, sim_len):\n \"\"\"\n sim: simulator instance\n param_vals: values for variable parameters {var: vals} where `vals` is an\n array of len `n` for all `var`\n sim_len: total length of simulation (will be divided into `n` segments\n \"\"\"\n output = []\n for _ in sim.monitors:\n output.append( ([], []) )\n\n\n n_segs = len(list(param_vals.values())[0])\n seg_len = sim_len / n_segs\n\n for n in range(n_segs):\n for var, vals in param_vals.items():\n setattr(sim.model, var, np.r_[vals[n]])\n\n partial_output = sim.run(simulation_length=seg_len)\n\n for j, (times,data) in enumerate(partial_output):\n output[j][0].append(times)\n output[j][1].append(data)\n\n\n np_output = []\n for time, data in output:\n np_output.append( ( np.concatenate(time), np.concatenate(data)))\n\n return np_output\n\ndef generate_Js(sigma_J, N, J0):\n mu_J = sigma_J * np.sqrt(2/np.pi)\n return J0 + np.abs(np.random.normal(scale=sigma_J,size=N))- mu_J\n\n\nclass LinearNorm(equations.TemporalApplicableEquation):\n equation = Final(\n label=\"Equation\",\n default=\"a * abs(x_0 - var) + b\",\n doc=\"\"\":math:`result = a * (|x_0-x|) + b`\"\"\")\n\n parameters = Attr(\n field_type=dict,\n label=\"Parameters\",\n default=lambda: {\"a\": 1.0, \"b\": 0.0, \"x_0\":0})\n\ndef MontbrioPazoRoxin_up_state(tau, Delta, J, eta):\n r_0 = sorted(\n np.roots(\n p=np.r_[\n tau**2 * np.pi**2,\n -J * tau,\n -eta,\n Delta/(4*np.pi**2*tau**2),\n 0\n ]\n )\n )[-1]\n V_0 = -Delta/(2*np.pi*tau*r_0)\n\n return r_0, V_0\n\n\n\n\n \n","sub_path":"src/old/untitled folder/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":6903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"170702555","text":"NAMES_CONFIG = {\n 'dwarf': {\n 'given_name': {\n 'male': [\n ['B', 'L', 'D', 'M', 'F', 'N', 'G', 'R', 'Gl', 'S', 'H', 'T', 'K', 'V'],\n ['a', 'o', 'e', 'oi', 'i', 'u'],\n ['bur', 'mli', 'fur', 'nar', 'gan', 'nus', 'gnus', 'rin', 'gnar', 'ran', 'li', 'sin',\n 'lin',\n 'sil', 'lir', 'sur']\n ],\n 'female': [\n ['Al', 'L', 'Br', 'M', 'C', 'Mer', 'Cl', 'N', 'D', 'R', 'El', 'S', 'Gw', 'Ys', 'J'],\n ['a', 'o', 'ae', 'u', 'e', 'y', 'ea', 'w', 'i'],\n ['brylla', 'ngwen', 'cla', 'niver', 'dda', 'noic', 'll', 'ra', 'lla', 'rka', 'llyra',\n 'ryan',\n 'lonna', 'ssa', 'lyan', 'vyan', 'na']\n ]\n },\n 'surname': [\n ['Amber', 'Boulder', 'Demon', 'Foe', 'Hammer', 'Light', 'Pious', 'Spiced', 'Under',\n 'Amethyst',\n 'Brew', 'Diamond', 'Forge', 'Hard', 'Long', 'Plate', 'Spiked', 'Valor', 'Amulet', 'Brick',\n 'Dim', 'Forked', 'Hearth', 'Lore', 'Plague', 'Spirit', 'War', 'Anvil', 'Bright', 'Divine',\n 'Foul', 'Heavy', 'Mage', 'Quartz', 'Steel', 'Warm', 'Ash', 'Brilliant', 'Dragon', 'Frost',\n 'Heroic', 'Magma', 'Red', 'Stern', 'White', 'Axe', 'Bristle', 'Drake', 'Garnet', 'Hill',\n 'Marble', 'Righteous', 'Stone', 'Wind', 'Barbed', 'Broad', 'Doom', 'Gem', 'Holy', 'Merry',\n 'Ring', 'Storm', 'Wise', 'Barrel', 'Bronze', 'Dusk', 'Ghoul', 'Honor', 'Mighty', 'Rock',\n 'Stout', 'Wizard', 'Battle', 'Brown', 'Earth', 'Giant', 'Horn', 'Mithril', 'Ruby', 'Strong',\n 'Wolf', 'Beast', 'Candle', 'Elf', 'Glander', 'Ingot', 'Mountain', 'Rune', 'Sturdy', 'Worn',\n 'Bellows', 'Cart', 'Ember', 'Glitter', 'Iron', 'Mug', 'Sapphire', 'Talisman', 'Wraith',\n 'Berserk', 'Cask', 'Emerald', 'Glory', 'Jacinth', 'Night', 'Scale', 'Tankard', 'Wyrm',\n 'Beryl',\n 'Cave', 'Ever', 'Goblin', 'Jade', 'Noble', 'Shadow', 'Thunder', 'Bitter', 'Cavern', 'Evil',\n 'Golen', 'Jewel', 'Oaken', 'Shatter', 'Topaz', 'Black', 'Chain', 'Fey', 'Granite', 'Keen',\n 'Oath', 'Shield', 'Torch', 'Blazing', 'Chaos', 'Fiery', 'Great', 'Keg', 'Ogre', 'Short',\n 'Treasure', 'Blessed', 'Coal', 'Fire', 'Grey', 'Kobold', 'Omen', 'Silky', 'True', 'Blood',\n 'Coin', 'Flagon', 'Grim', 'Lamp', 'Onyx', 'Silvery', 'Troll', 'Blonde', 'Cold', 'Flask',\n 'Ground', 'Lantern', 'Opal', 'Smelt', 'Thunder', 'Blud', 'Copper', 'Flat', 'Grudge',\n 'Large',\n 'Order', 'Snow', 'Tunnel', 'Bone', 'Dark', 'Flint', 'Grumble', 'Lava', 'Ore', 'Sword',\n 'Twilight', 'Bottle', 'Deep', 'Foamy', 'Hall', 'Leather', 'Ork', 'Orc', 'Sour', 'Undead'],\n ['ale', 'blood', 'cane', 'delver', 'hammer', 'keg', 'mountain', 'shoulder', 'thane', 'arm',\n 'bloom', 'carver', 'explorer', 'hand', 'killer', 'mover', 'singer', 'thumb', 'armor',\n 'bolt',\n 'cask', 'eye', 'hauler', 'kindler', 'mug', 'slicer', 'tilter', 'axe', 'boot', 'chanter',\n 'finder', 'head', 'lamp', 'pack', 'slayer', 'tinker', 'back', 'bottle', 'chest', 'finger',\n 'heart', 'light', 'pick', 'smelter', 'toe', 'bane', 'born', 'chin', 'fire', 'helm', 'mace',\n 'plaits', 'smith', 'twister', 'barrel', 'bottom', 'chipper', 'fist', 'helmet', 'mail',\n 'pouch',\n 'speaker', 'warden', 'basher', 'bow', 'chisel', 'flask', 'hide', 'maker', 'pot', 'spinner',\n 'wearer', 'beard', 'braids', 'chopper', 'foam', 'hill', 'mallet', 'purse', 'spike',\n 'weaver',\n 'bearer', 'brand', 'chugger', 'foot', 'hewer', 'mane', 'rum', 'spirit', 'whiskey', 'beater',\n 'brandy', 'cleaver', 'forged', 'hoarder', 'mantle', 'rye', 'splitter', 'worker', 'beer',\n 'breaker', 'cliff', 'friend', 'hood', 'masher', 'sack', 'staff', 'wright', 'belly', 'brew',\n 'cloak', 'gauntlet', 'horn', 'mason', 'satchel', 'stone', 'belt', 'bringer', 'coat', 'grog',\n 'hot', 'master', 'seeker', 'sunder', 'bender', 'brow', 'crafter', 'guardian', 'hunter',\n 'mattock', 'seer', 'sword', 'binder', 'buckle', 'cutter', 'gut', 'jaw', 'maul', 'shaker',\n 'tankard', 'biter', 'buster', 'dagger', 'guzzler', 'jewel', 'miner', 'shanks', 'tapper',\n 'blade', 'caller', 'defender', 'hall', 'keeper', 'monger', 'shield', 'tearer'],\n ],\n },\n 'elf': {\n 'given_name': {\n 'male': [\n ['Va', 'The', 'A', 'Ae', 'Ara', 'Bei', 'Be', 'Ca', 'E', 'Ga', 'Gali', 'Ha', 'He', 'I',\n 'Ive', 'La',\n 'Lau', 'Mi', 'Pa', 'Pae', 'Pe', 'Qa', 'Qau', 'Ri', 'Ria', 'Ro', 'So', 'Tha'],\n ['r', 'l', 'nn', 'r', 'rr', 'd', 'rev', 'lim', 'dar', 'mm', 'll', 'c', 'ndart', 'd',\n 'zel', 'mi', 'riv',\n 'rd', 'nial', 'ldr'],\n ['is', 'en', 'an', 'ar', 'il', 'o', 'ic', 'ai', 'al', 'ios', 'ian', 'ias', 'en', 'ion',\n 'on', 'iss',\n 'ior', 'ol', 'eral']\n ],\n 'female': []\n },\n 'surname': [\n ['Ama', 'Gala', 'Holi', 'Ilphel', 'Lia', 'Melia', 'Nai', 'Sian', 'Xilo', 'Alean', 'Alea',\n 'Arabi',\n 'Arkenea', 'Auvrea', 'Baequi', 'Banni', 'CyGreen', 'Dirth', 'Dryear', 'Dwi', 'Eyllis', 'Eyther',\n 'Freani',\n 'Gysse', 'Hlae', 'Hunith', 'Kennyr', 'Kille', 'Maern', 'Melith', 'Myrth', 'Norre', 'Orle',\n 'Oussea',\n 'Rilynn', 'Tease', 'Tyr', 'Tyrnea'],\n ['kiir', 'stacia', 'nodel', 'mion', 'kiir', 'don', 'mne', 'lo', 'scient', 'altin', 'anea',\n 'annia', 'aear',\n 'arnith', 'atear', 'athem', 'dlues', 'elrvis', 'eplith', 'ettln', 'ghymn', 'itryn', 'lylth',\n 'mitore',\n 'neldth', 'rae', 'raheal', 'rretyn', 'sithek', 'thym', 'tlarn', 'tlithar', 'tylar', 'undlin',\n 'urdrenn',\n 'valsa', 'virrea', 'zea']\n ]\n },\n 'half-orc': {\n 'given_name': [\n ['Ghazat', 'Abghat', 'Adgulg', 'Aghed', 'Agugh', 'Aguk', 'Almthu', 'Alog', 'Ambilge',\n 'Apaugh', 'Argha', 'Argigoth', 'Argug', 'Arpigig', 'Auhgan', 'Azhug', 'Bagdud', 'Baghig',\n 'Bahgigoth', 'Bandagh', 'Barfu', 'Bargulg', 'Baugh', 'Bidgug', 'Bildud', 'Bilge', 'Bog',\n 'Boghat', 'Bogugh', 'Borgan', 'Borug', 'Braugh', 'Brougha', 'Brugagh', 'Bruigig', 'Buadagh',\n 'Buggug', 'Builge', 'Buimghig', 'Bulgan', 'Bumhug', 'Buomaugh', 'Buordud', 'Burghed', 'Buugug',\n 'Cabugbu', 'Cagan', 'Carguk', 'Carthurg', 'Clog', 'Corgak', 'Crothu', 'Cubub', 'Cukgilug',\n 'Curbag', 'Dabub', 'Dugarod', 'Dugorim', 'Duiltag', 'Durbag', 'Eagungad', 'Eggha', 'Eggugat',\n 'Egharod', 'Eghuglat', 'Eichelberbog', 'Ekganit', 'Epkagut', 'Ergoth', 'Ertguth', 'Ewkbanok',\n 'Fagdud', 'Faghig', 'Fandagh', 'Farfu', 'Farghed', 'Fargigoth', 'Farod', 'Faugh', 'Feldgulg',\n 'Fidgug', 'Filge', 'Fodagog', 'Fogugh', 'Fozhug', 'Frikug', 'Frug', 'Frukag', 'Fubdagog',\n 'Fudhagh', 'Fupgugh', 'Gnadug', 'Gnalurg', 'Gnarg', 'Gnarlug', 'Gnorl', 'Gnorth', 'Gnoth',\n 'Gnurl', 'Golag', 'Golub', 'Gomatug', 'Gomoku', 'Gorgu', 'Gorlag', 'Grikug', 'Grug', 'Grukag',\n 'Grukk', 'Grung', 'Gruul', 'Guag', 'Gubdagog', 'Gudhagh', 'Gug', 'Gujarek', 'Gujek', 'Gujjab',\n 'Gulm', 'Gulrn', 'Gunaakt', 'Gunag', 'Gunug', 'Gurukk', 'Guthakug', 'Guthug', 'Gutjja', 'Hagob',\n 'Hagu', 'Hagub', 'Haguk', 'Hebub', 'Hegug', 'Hibub', 'Hig', 'Hogug', 'Hoknath', 'Hoknuk',\n 'Hokulk', 'Holkurg', 'Horknuth', 'Hrolkug', 'Hugagug', 'Hugmug', 'Hugolm', 'Ig', 'Igmut',\n 'Ignatz', 'Ignorg', 'Igubat', 'Igug', 'Igurg', 'Ikgnath', 'Ikkath', 'Inkathu', 'Inkathurg',\n 'Isagubat', 'Jogug', 'Jokgagu', 'Jolagh', 'Jorgagu', 'Jregh', 'Jreghug', 'Jugag', 'Jughog',\n 'Jughragh', 'Jukha', 'Jukkhag', 'Julakgh', 'Kabugbu', 'Kagan', 'Kaghed', 'Kahigig', 'Karfu',\n 'Karguk', 'Karrghed', 'Karrhig', 'Karthurg', 'Krugorim', 'Kubub', 'Kugbu', 'Kukgilug', 'Kulgha',\n 'Kupgugh', 'Kurbag', 'Kurmbag', 'Laghed', 'Lamgugh', 'Mabub', 'Magdud', 'Malthu', 'Marfu',\n 'Margulg', 'Mazhug', 'Meakgu', 'Mergigoth', 'Milug', 'Mudagog', 'Mugarod', 'Mughragh', 'Mugorim',\n 'Murbag', 'Naghat', 'Naghig', 'Naguk', 'Nahgigoth', 'Nakgu', 'Narfu', 'Nargulg', 'Narhbub',\n 'Narod', 'Neghed', 'Nehrakgu', 'Nildud', 'Nodagog', 'Nofhug', 'Nogugh', 'Nomgulg', 'Noogugh',\n 'Nugbu', 'Nughilug', 'Nulgha', 'Numhug', 'Nurbag', 'Nurghed', 'Oagungad', 'Oakgu', 'Obghat',\n 'Oggha', 'Oggugat', 'Ogharod', 'Oghuglat', 'Oguk', 'Ohomdud', 'Ohulhug', 'Oilug', 'Okganit',\n 'Olaghig', 'Olaugh', 'Olmthu', 'Olodagh', 'Olog', 'Omaghed', 'Ombilge', 'Omegugh', 'Omogulg',\n 'Omugug', 'Onog', 'Onubub', 'Onugug', 'Oodagh', 'Oogorim', 'Oogugbu', 'Oomigig', 'Opathu',\n 'Opaugh', 'Opeghat', 'Opilge', 'Opkagut', 'Opoguk', 'Oquagan', 'Orgha', 'Orgoth', 'Orgug',\n 'Orpigig', 'Ortguth', 'Otugbu', 'Ougha', 'Podagog', 'Pofhug', 'Pomgulg', 'Poogugh', 'Porgarag',\n 'Pregu', 'Pretkag', 'Prigka', 'Prikdarok', 'Prutha', 'Pughilug', 'Puiltag', 'Purbag', 'Qog',\n 'Quadagh', 'Quilge', 'Quimghig', 'Quomaugh', 'Quordud', 'Quugug', 'Raghat', 'Raguk', 'Rakgu',\n 'Rarfu', 'Rebub', 'Rilug', 'Rodagog', 'Rogan', 'Romarod', 'Routhu', 'Rugbu', 'Rugorim', 'Rurbag',\n 'Rurigig', 'Sabub', 'Saghig', 'Sahgigoth', 'Sahgorim', 'Sakgu', 'Salthu', 'Saraugug', 'Sarfu',\n 'Sargulg', 'Sarhbub', 'Sarod', 'Sbghat', 'Seakgu', 'Sguk', 'Shomdud', 'Shulhug', 'Sildud',\n 'Silge', 'Silug', 'Sinsbog', 'Slaghig', 'Slapdud', 'Slaugh', 'Slodagh', 'Slog', 'Slughig',\n 'Smaghed', 'Smegugh', 'Smogulg', 'Snog', 'Snubub', 'Snugug', 'Sodagh', 'Sog', 'Sogorim',\n 'Sogugbu', 'Sogugh', 'Sombilge', 'Somigig', 'Sonagh', 'Sorgulg', 'Sornaraugh', 'Soughat',\n 'Spathu', 'Speghat', 'Spilge', 'Spoguk', 'Squagan', 'Surgug', 'Surpigig', 'Tagdud', 'Taghig',\n 'Tandagh', 'Tarfu', 'Targhed', 'Targigoth', 'Tarod', 'Taugh', 'Teldgulg', 'Tidgug', 'Tilge',\n 'Todagog', 'Tog', 'Toghat', 'Togugh', 'Torgan', 'Torug', 'Tozhug', 'Traugh', 'Trilug', 'Trougha',\n 'Trugagh', 'Truigig', 'Tuggug', 'Tulgan', 'Turbag', 'Turge', 'Ug', 'Ugghra', 'Uggug', 'Ughat',\n 'Ulgan', 'Ulmragha', 'Ulmrougha', 'Umhra', 'Umragig', 'Umruigig', 'Ungagh', 'Unrugagh', 'Urag',\n 'Uraugh', 'Urg', 'Urgan', 'Urghat', 'Urgran', 'Urlgan', 'Urmug', 'Urug', 'Urulg', 'Vabugbu',\n 'Vagan', 'Vagrungad', 'Vagungad', 'Vakgar', 'Vakgu', 'Varkgorim', 'Varthurg', 'Vegum', 'Vergu',\n 'Verlgu', 'Verthag', 'Verthurg', 'Vetorkag', 'Vidarok', 'Vigdolg', 'Vigdug', 'Viggu', 'Viggulm',\n 'Viguka', 'Vitgurat', 'Vitgut', 'Vlog', 'Vlorg', 'Vorgak', 'Vorgarag', 'Vothug', 'Vregu',\n 'Vretkag', 'Vrigka', 'Vrikdarok', 'Vrogak', 'Vrograg', 'Vrothu', 'Vruhag', 'Vrutha', 'Vubub',\n 'Vugub', 'Vuiltag', 'Vukgilug', 'Vultog', 'Woglug', 'Wokganit', 'Womkug', 'Womrikug',\n 'Wonabadug', 'Worthag', 'Wraog', 'Wrug', 'Wrukag', 'Wrukaog', 'Wubdagog', 'Wudgh', 'Wudhagh',\n 'Wudugog', 'Wuglat', 'Wumanok', 'Wumkbanok', 'Wurgoth', 'Wurmha', 'Wurtguth', 'Wurthu',\n 'Wutgarek', 'Xaakt', 'Xago', 'Xagok', 'Xagu', 'Xaguk', 'Xothkug', 'Xruul', 'Xuag', 'Xug',\n 'Xugaa', 'Xugag', 'Xugagug', 'Xugar', 'Xugarf', 'Xugha', 'Xugor', 'Xugug', 'Xujarek', 'Xuk',\n 'Xulgag', 'Xunaakt', 'Xunag', 'Xunug', 'Xurek', 'Xurl', 'Xurug', 'Xurukk', 'Xutag', 'Xuthakug',\n 'Xutjja', 'Yaghed', 'Yagnar', 'Yagnatz', 'Yahg', 'Yahigig', 'Yakgnath', 'Yakha', 'Yalakgh',\n 'Yargug', 'Yegigoth', 'Yegoth', 'Yerghug', 'Yerug', 'Ymafubag', 'Yokgagu', 'Yokgu', 'Yolmar',\n 'Yonkathu', 'Yregh', 'Yroh', 'Ysagubar', 'Yughragh', 'Zildud', 'Zilge', 'Zilug', 'Zinsbog',\n 'Zlapdud', 'Zlog', 'Zlughig', 'Zodagh', 'Zog', 'Zogugbu', 'Zogugh', 'Zombilge', 'Zonagh',\n 'Zorfu', 'Zorgulg', 'Zorhgigoth', 'Zornaraugh', 'Zoughat', 'Azuk', 'Bagamul', 'Bakh', 'Baronk',\n 'Bashag', 'Bazgulub', 'Bogakh', 'Borug', 'Both', 'Bugdul', 'Bugharz', 'Bugrash', 'Bugrol',\n 'Bumbub', 'Burul', 'Dul', 'Dular', 'Duluk', 'Duma', 'Dumbuk', 'Dumburz', 'Dur', 'Durbul',\n 'Durgash', 'Durz', 'Durzol', 'Durzub', 'Durzum', 'Garothmuk', 'Garzonk', 'Gashna', 'Ghamborz',\n 'Ghamonk', 'Ghoragdush', 'Ghorlorz', 'Glush', 'Grat', 'Guarg', 'Gurak', 'Khadba', 'Khagra',\n 'Khargol', 'Koffutto', 'Largakh', 'Lorbumol', 'Lorzub', 'Lugdum', 'Olumba', 'Orakh', 'Rogdul',\n 'Shakh', 'Shamar', 'Shamob', 'Shargam', 'Sharkub', 'Shat', 'Shulong', 'Shura', 'Shurkul',\n 'Shuzug', 'Snaglak', 'Snakha', 'Snat', 'Ugdumph', 'Ughash', 'Bogrum', 'Brag', 'Brokil', 'Bugak',\n 'Buramog', 'Burz', 'Dubok', 'Dul', 'Dulfish', 'Dumag', 'Dulphumph', 'Gaturn', 'Gogron', 'Gorgo',\n 'Graklak', 'Graman', 'Grommok', 'Gul', 'Hanz', 'Arob', 'Balogog', 'Borkul', 'Burguk',\n 'Dushnamub', 'Gat', 'Ghamorz', 'Ghorbash', 'Gradba', 'Yamarz', 'Yar']\n ],\n 'surname': [\n ['Agrob', 'Badbog', 'Bashuk', 'Bogdub', 'Bugdurash', 'Bula', 'Bulak', 'Bulfim', 'Bum', 'Burzob',\n 'Burub', 'Dura', 'Durgat', 'Durz', 'Gashnakh', 'Ghob', 'Glasha', 'Glob', 'Gluronk', 'Gonk', 'Grat',\n 'Grazob', 'Gulfim', 'Kharzug', 'Lagakh', 'Lambug', 'Lazgar', 'Mogak', 'Morn', 'Murob', 'Murzush',\n 'Nargol', 'Rolfish', 'Orbul', 'Ragash', 'Rulfim', 'Shadbak', 'Shagar', 'Shagdub', 'Sharn', 'Sharog',\n 'Shazgob', 'Shelur', 'Uloth', 'Ulumpha', 'Urzoth', 'Urzul', 'Ushat', 'Ushug', 'Yazgash', 'Batul',\n 'Borba', 'Bumph', 'Homraz', 'Rogbut', 'Mazoga', 'Mog', 'Mor', 'Oghash', 'Rogmesh', 'Snak', 'Ugak',\n 'Umog', 'Arob', 'Atub', 'Bagrak', 'Bolar', 'Bor', 'Borgakh', 'Dulug', 'Garakh', 'Ghak', 'Gharol',\n 'Ghorza', 'Gul', 'Lash', 'Murbol', 'Sharamph', 'Shel', 'Shufharz', 'Ugor', 'Urog', 'Yotul']\n ]\n }\n}\n","sub_path":"generators/generators/name_config.py","file_name":"name_config.py","file_ext":"py","file_size_in_byte":14395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"329230028","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 22 16:06:27 2020\n\n@author: jwhel\n\"\"\"\n\n# Documentation\n\n# The University of Texas at Austin - Spring 2020\n# ME 337G - Nuclear Safety and Security - Dr. HAAS, Derek \n# Team 7 - Bomb Squad - INANC, Ece Shelby WHELAN, Jack \n\n# This code uses the Bateman equation to solve for the concentrations of\n# the daughter products of Xe-140, a fission fragment of U-235.\n\n## Constants \nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.animation import FuncAnimation\n\n#Place holders for velocity terms\nLeakVelocity = 1 #[m/s]\nSeepVelocity = 10 #[m/s]\nVentVelocity = 100 #[m/s]\n\n#Reactivity Constants including placeholders [1/m]\nXeReactivity = 0\nCsReactivity = np.log(50)\nBaReactivity = np.log(40)\nLaReactivity = np.log(30)\nCeReactivity = np.log(20)\n\n#placeholder time value\nt = 1\n#Reactivity Decay Terms : exp([1/m]*[m/s]*[s])\nRD_Xe_Leak = np.exp(XeReactivity*LeakVelocity*t)\nRD_Xe_Seep = np.exp(XeReactivity*SeepVelocity*t)\nRD_Xe_Vent = np.exp(XeReactivity*VentVelocity*t)\n\nRD_Cs_Leak = np.exp(CsReactivity*LeakVelocity*t)\nRD_Cs_Seep = np.exp(CsReactivity*SeepVelocity*t)\nRD_Cs_Vent = np.exp(CsReactivity*VentVelocity*t)\n\nRD_Ba_Leak = np.exp(BaReactivity*LeakVelocity*t)\nRD_Ba_Seep = np.exp(BaReactivity*SeepVelocity*t)\nRD_Ba_Vent = np.exp(BaReactivity*VentVelocity*t)\n\nRD_La_Leak = np.exp(LaReactivity*LeakVelocity*t)\nRD_La_Seep = np.exp(LaReactivity*SeepVelocity*t)\nRD_La_Vent = np.exp(LaReactivity*VentVelocity*t)\n\nRD_Ce_Leak = np.exp(CeReactivity*LeakVelocity*t)\nRD_Ce_Seep = np.exp(CeReactivity*SeepVelocity*t)\nRD_Ce_Vent = np.exp(CeReactivity*VentVelocity*t)\n\n#timespan adjusts the time for which the model will examine, also affects frames in gif\ntimespan = 1000 # [s]\n\n\nHL_XE = 14 # Half-life of Xe-140, [s]\nHL_CS = 64 # Half-life of Cs-140, [s]\nHL_BA = 13*24*60*60 # Half-life of Ba-140, [s]\nHL_LA = 40*60*60 # Half-life of La-140, [s]\n\nLL_XE = np.log(2)/HL_XE # Decay constant of Xe-140, [1/s]\nLL_CS = np.log(2)/HL_CS # Decay constant of Cs-140, [1/s]\nLL_BA = np.log(2)/HL_BA # Decay constant of Ba-140, [1/s]\nLL_LA = np.log(2)/HL_LA # Decay constant of La-140, [1/s]\nLL_CE = 0\n\nLL = [LL_XE, LL_CS, LL_BA, LL_LA, LL_CE]\n\n## Compuate Coefficients\n\ncoefficients = []\nfor NUM_COEFF in range(0,5):\n row = []\n for j in range(0,NUM_COEFF+1):\n NUMERATOR = 1\n DENOMINATOR = 1\n for k in range(0,NUM_COEFF+1):\n NUMERATOR *= LL[k]\n if k is not j:\n DENOMINATOR *= LL[k]-LL[j]\n row.append(NUMERATOR/DENOMINATOR)\n while len(row) < 5:\n row.append(0)\n coefficients.append(row)\n# print(row)\nCOEFF = np.array(coefficients)\n\n\n## Compute Radionuclide Density\n## NN[0] is the starting amounts of each radionuclide\nInitial_amount_fission_product = 1\nNN = [[Initial_amount_fission_product,0,0,0,0]]\nT = 0\nfor i in range(0,int(timespan/2)):\n row = []\n T += 2 # Increment time, [s]\n for j in range(0,5):\n SUMM = 0\n for k in range(0,j+1):\n SUMM += COEFF[j][k]*np.exp((-1)*LL[k]*T) #this is where we would multiply by the relevant reactivity decay term\n if j < 4:\n row.append(NN[0][0]*SUMM/LL[j])\n row.append((NN[i-1][4]+LL[3]*(row[3]-NN[i-1][3]))) \n \n NN.append(row)\n \nXe = []\nCs = []\nBa = []\nLa = []\nCe = []\nfor x in NN:\n Xe.append(x[0])\n Cs.append(x[1])\n Ba.append(x[2])\n La.append(x[3])\n Ce.append(x[4])\n \nNN = np.array(NN)\n\ntime = [*range(0,timespan+1,2)]\nplt.figure()\n\nplt.plot(time,Xe,label = 'Xe-140')\n\nplt.plot(time,Cs,label = 'Cs-140')\n\nplt.plot(time,Ba,label = 'Ba-140')\n\nplt.plot(time,La,label = 'La-140')\n\nplt.plot(time,Ce,label = 'Ce-140')\n\nplt.yscale('log')\nplt.xlabel('Time, [s]')\nplt.ylabel('Nuclear Density, [# of nuclei/cm^3]')\nplt.title('Nuclear Density of Xe-140 Daughter Products')\nplt.legend()\nplt.grid()\nplt.savefig('Model_Graph.png')\n\n# create the scatter plot.\nfig, ax = plt.subplots()\nfig.set_tight_layout(True)\n \n# Query the figure's on-screen size and DPI. \n# Note that when saving the figure to a file, \n# we need to provide a DPI for that separately.\nprint('fig size: {0} DPI, size in inches {1}'.format(fig.get_dpi(), fig.get_size_inches()))\n \n# Plot the set of scatter points\n\n#plt.axis([0, 200, 0, 1])\nplt.yscale('log')\nplt.ylabel('Nuclear Density, [# of nuclei/cm^3]')\nplt.title('Nuclear Density of Xe-140 Daughter Products')\nplt.grid()\nlegend = plt.legend()\nplt.xlim(0,timespan)\n\nlineXe, = ax.plot(time[0:2],Xe[0:2],label = 'Xe-140', c = 'g')\nlineCs, = ax.plot(time[0:2],Cs[0:2],label = 'Cs-140', c = 'c')\nlineBa, = ax.plot(time[0:2],Ba[0:2],label = 'Ba-140', c = 'b')\nlineLa, = ax.plot(time[0:2],La[0:2],label = 'La-140', c = 'r')\nlineCe, = ax.plot(time[0:2],Ce[0:2],label = 'Ce-140', c = 'm')\n# This function updates the plot when it is called to generate\n# each frame requested by the FuncAnimation method\ndef update(i):\n label = 'Timestep {0} [s]'.format(i*2-2)\n print(label)\n # Update the axes (with a new xlabel). Return a tuple of\n # \"artists\" that have to be redrawn for this frame.\n ax.set_xlabel(label)\n\n plt.legend()\n lineXe.set_xdata(time[0:i+1])\n lineXe.set_ydata(Xe[0:i+1])\n lineCs.set_xdata(time[0:i+1])\n lineCs.set_ydata(Cs[0:i+1])\n lineBa.set_xdata(time[0:i+1])\n lineBa.set_ydata(Ba[0:i+1])\n lineLa.set_xdata(time[0:i+1])\n lineLa.set_ydata(La[0:i+1])\n lineCe.set_xdata(time[0:i+1])\n lineCe.set_ydata(Ce[0:i+1])\n\n return lineXe,lineCs,lineBa,lineLa,lineCe,ax\n\n# Create an animation object from the created figure that includes\n\nanim = FuncAnimation(fig, update, frames=len(Xe)+1, interval=100, repeat_delay = 10000)\n\n# save a gif of the animation using the writing package from magick\nanim.save('Model_Gif.gif', dpi=72, writer='imagemagick')\n","sub_path":"Project_Model.py","file_name":"Project_Model.py","file_ext":"py","file_size_in_byte":5762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"363975824","text":"__author__ = 'taejoon'\nimport sys\nimport trello_query\nimport utils\n\nclass Cli:\n\n def __init__(self):\n if len(sys.argv) > 1:\n self._parse_arg(sys.argv[1:]).run()\n\n def _parse_arg(argv: list) -> trello_query.Query:\n if len(argv) == 0:\n raise ValueError('Expected a command')\n return trello_query.Query.factory(argv[0], argv[1:])\n _parse_arg = staticmethod(_parse_arg)\n\n def go(self):\n while True:\n user_input = input(\"> \")\n user_input = user_input.split()\n try:\n query = self._parse_arg(user_input)\n query.run()\n except ValueError as e:\n utils.print_error(str(e))\n\n\ndef main():\n cli = Cli()\n cli.go()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"itrello.py","file_name":"itrello.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"71963460","text":"import os\n\npath = os.getcwd() # pwd\nprint(path)\nos.mkdir(\"newFOlder\") # mkdir newFolder\ndesktopPath = \"/home/kirigaikabuto/Desktop\"\nos.chdir(desktopPath) # cd /home/kirigaikabuto/Desktop\nos.mkdir(\"newFOlder\") # mkdir newFolder\nnewPath = os.getcwd() # pdw\nprint(newPath)\n\n# создать папку на рабочем столе(desktopFolder) и внутри нее создать еще три папки(1,2,3)\n# после создания папки на рабочем столе необходимо поменять путь в котором вы находитесь","sub_path":"lesson7/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"613611878","text":"import os, argparse, logging\nimport numpy as np\nfrom os.path import join\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nfrom tqdm import trange, tqdm\nfrom ipdb import set_trace as st\nfrom models.se3net import SE3Net\nfrom util.utils import weight_init, set_gpu_mode, zeros, get_numpy, fc_data_to_loaders, trajectories_to_data\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn.metrics import mean_squared_error\nfrom scipy import optimize\nfrom scipy.optimize import least_squares\nfrom sklearn.gaussian_process import GaussianProcessRegressor\nimport pickle\n\n_LOSS = nn.MSELoss\n\ndef compute_acc(labels_pred, y):\n N = len(labels_pred)\n corrects = labels_pred * y\n acc = torch.sum(corrects) / 2 / N\n return get_numpy(acc)\n\ndef labels_from_preds(preds):\n prob = F.softmax(preds, dim=1)\n _, indices = torch.topk(prob, 2)\n labels_pred = zeros(preds.shape, requires_grad=False)\n\n N = len(preds)\n labels_pred[np.arange(N), indices[:, 0]] = 1\n labels_pred[np.arange(N), indices[:, 1]] = 1\n\n return labels_pred\n\ndef forward_results(x, y, model):\n preds = model(x)\n labels_pred = labels_from_preds(preds)\n\n criterion = _LOSS()\n loss = get_numpy(criterion(preds, y))\n acc = compute_acc(labels_pred, y)\n\n return loss, acc, preds, labels_pred\n\ndef get_input_optimizer(action):\n optimizer = optim.Adam([action.requires_grad_()], lr=0.01)\n return optimizer\n\ndef train(model, loader_tr, loader_t, lr=1e-4, epochs=100):\n logs = {\n 'loss': {\n 'tr': [],\n 't': []\n },\n 'acc': {\n 'tr': [],\n 't': []\n }\n }\n criterion = _LOSS()\n opt = optim.Adam(model.parameters(), lr=lr)\n t_epochs = trange(epochs, desc='{}/{}'.format(0, epochs))\n num_batches_tr = len(loader_tr)\n num_batches_t = len(loader_t)\n for e in t_epochs:\n # Train\n loss_tr = 0\n acc_tr = 0\n t_batches = tqdm(loader_tr, leave=False, desc='Train')\n for xb, yb in t_batches:\n opt.zero_grad()\n pred = model(xb)\n\n loss = criterion(pred, yb)\n labels_pred = labels_from_preds(pred)\n acc = compute_acc(labels_pred, yb)\n loss_tr += loss\n acc_tr += acc\n\n loss.backward()\n opt.step()\n\n t_batches.set_description('Train: {:.2f}, {:.2f}'.format(loss, acc))\n t_batches.update()\n\n if e == 10:\n action_plan = torch.rand(3).cuda() * 0.01\n cur_state = xb[-1][:-3]\n action_opt = get_input_optimizer(action_plan)\n # scheduler = ReduceLROnPlateau(optimizer, 'min')\n def closure():\n action_opt.zero_grad()\n pred1 = model(torch.cat((cur_state, action_plan), dim =0))\n loss1 = criterion(pred1, yb[-1])\n loss1.backward()\n return loss1\n print(xb[-1][-3:])\n for j in range(1000):\n action_opt.zero_grad()\n pred1 = model(torch.cat((cur_state, action_plan), dim =0))\n loss1 = criterion(pred1, yb[-1])\n loss1.backward() \n print(action_plan.data.cpu())\n action_opt.step()\n # scheduler.step(loss1)\n pred1 = model(torch.cat((cur_state, action_plan), dim =0))\n loss1 = criterion(pred1, yb[-1])\n\n st()\n\n loss_tr /= num_batches_tr\n acc_tr /= num_batches_tr\n\n # Eval on test\n loss_t = 0\n acc_t = 0\n for xb, yb in tqdm(loader_t, leave=False, desc='Eval'):\n loss, acc, _, _ = forward_results(xb, yb, model)\n loss_t += loss\n acc_t += acc\n loss_t /= num_batches_t\n acc_t /= num_batches_t\n \n t_epochs.set_description('{}/{} | Tr {:.2f}, {:.2f}. T {:.2f}, {:.2f}'.format(e, epochs, loss_tr, acc_tr, loss_t, acc_t))\n t_epochs.update()\n print('epoch: ', e)\n print('train_loss: ', loss_tr)\n print('test_loss: ', loss_t)\n logs['loss']['tr'].append(loss_tr)\n logs['acc']['tr'].append(acc_tr)\n logs['loss']['t'].append(loss_t)\n logs['acc']['t'].append(acc_t)\n print('-'*10)\n\n return logs\n\ndef train_linear_model(X_tr, X_t, Y_tr, Y_t):\n # reg = LinearRegression().fit(X_tr, Y_tr )\n # print(reg.score(X_tr, Y_tr))\n # print(reg.score(X_t, Y_t))\n # print('-')\n clf = KernelRidge(alpha=0.01)\n clf = GaussianProcessRegressor(alpha=0.01)\n clf.fit(X_tr, Y_tr)\n print(clf.score(X_tr, Y_tr))\n print(clf.score(X_t, Y_t)) \n\n # grad = get_gradient(clf, X_tr[0], Y_tr[0])\n for _ in range(5):\n idx = np.random.randint(0, len(X_tr))\n u = X_tr[idx][-6:]\n x = X_tr[idx][:-6]\n xnext = Y_tr[idx]\n u_opt = get_optimal_action(u, x, xnext, clf)\n print('converged')\n # st()\n pickle.dump(clf, open('/home/msieb/projects/bullet-demonstrations/model.pkl', 'wb'))\n return clf\n\n\n\ndef get_gradient(clf, x, y):\n def func(x, y):\n return mean_squared_error(clf.predict(x[None])[0], y)\n eps = 1e-6\n return optimize.approx_fprime(x, func, eps, y)\n\ndef fun(u, clf, x, xnext):\n xu = np.concatenate((x, u))\n return clf.predict(xu[None])[0] - xnext\n\ndef get_optimal_action(u, x, xnext, clf):\n res = least_squares(fun, u, method='lm', f_scale=0.1, args=(clf, x, xnext))['x']\n return res\n\nif __name__ == '__main__':\n set_gpu_mode(True)\n logging.getLogger().setLevel(logging.INFO)\n parser = argparse.ArgumentParser()\n root_dir = '/home/msieb/projects/bullet-demonstrations/experiments/cube_push/demos_3'\n # parser.add_argument('--x_data', '-x', type=str, default=join(data_dir, 'relative_end_effector_states.npy'))\n # parser.add_argument('--u_data', '-u', type=str, default=join(data_dir, 'action_states.npy'))\n # parser.add_argument('--cube_data', '-c', type=str, default=join(data_dir, 'cube_states.npy'))\n parser.add_argument('--out_dir', '-o', type=str, default='output/rn_rigid')\n parser.add_argument('--test_size', '-t', type=float, default=0.2)\n parser.add_argument('--epochs', '-e', type=int, default=100)\n parser.add_argument('--learning_rate', '-r', type=float, default=1e-4)\n parser.add_argument('--batch_size', '-b', type=int, default=2)\n args = parser.parse_args()\n\n logging.info('Loading {}'.format(root_dir))\n logging.info('Processing Data')\n x_data, xnext_data, u_data = trajectories_to_data(root_dir)\n\n loader_tr, loader_t, X_tr, X_t, Y_tr, Y_t = fc_data_to_loaders(x_data, u_data, xnext_data, args.test_size, args.batch_size)\n train_linear_model(X_tr, X_t, Y_tr, Y_t) \n logging.info('Training.')\n\n # TODO\n model = SE3Net(loader_tr.dataset.tensors[0].shape[-1], \n loader_tr.dataset.tensors[1].shape[-1]).cuda()\n model.apply(weight_init)\n\n logs = train(model, loader_tr, loader_t, lr=args.learning_rate, epochs=args.epochs)\n # TODO save stuff\n\n import IPython\n IPython.embed()\n exit()\n","sub_path":"scripts/train_se3net.py","file_name":"train_se3net.py","file_ext":"py","file_size_in_byte":7264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"221776143","text":"from common import *\nimport numpy as np\nfrom config import config, getValue, getTime,isna\nimport io\nimport os \nimport sys\nfrom common import superuser_login\nfrom WaterConnection import *\nfrom WaterLegacyDemand import *\nimport pandas as pd\nimport openpyxl\nimport collections\nimport re\nimport traceback\n\nnow = datetime.now()\ndate_time = now.strftime(\"%d-%m-%Y\") \n# FOLDER_PATH =r'D:\\eGov\\Data\\WS\\Azure Insertion'\n# FOLDER_PATH =r'D:\\eGov\\Data\\WS\\UAT Insertion'\n# FOLDER_PATH =r'C:\\Users\\Administrator\\Downloads\\WaterSewerageTemplates'\nFOLDER_PATH =r'D:\\eGov\\Data\\WS\\Legacy Demand'\ncityToSkip = ['Bakloh','Bareilly','Dagshai','Dalhousie','Deolali','Ferozepur','Jabalpur','Jammu','Jutogh','Kanpur',\n 'Kasauli','Khasyol','Meerut','Nainital','Pachmarhi','Ramgarh','Secunderabad','Subathu', 'roorkee']\n\n# cityToInclude = ['Landour','Lansdowne','Lebong','Lucknow','Mathura','Mhow','Morar','Nasirabad','Pune','Ranikhet','Saugor',\n# 'Shahjahanpur','Shillong','Varanasi','Wellington']\n# cityToInclude = ['Jalapahar','Jhansi','Kamptee','Lebong','Lucknow','Mathura']\ncityToInclude = ['testing']\n\n\ndef main() : \n print(\"Replace 109 of C:\\ProgramData\\Miniconda3\\envs\\py36\\lib\\site-packages\\openpyxl\\worksheet\\merge.py with below one \") \n print (\"if side is None or side.style is None:\")\n # print('cityToSkip', len(cityToSkip))\n root = FOLDER_PATH \n errorlogfile = open(os.path.join(root, \"error CBs.txt\"), \"w\") \n successlogfile = open(os.path.join(root, \"CB With ProperData.txt\"), \"w\")\n notsuccesslogfile = open(os.path.join(root, \"CB With ImProperData.txt\"), \"w\")\n config.error_in_excel=[]\n config.error_in_multiple_owner=[]\n config.DATA_ENTRY_ISSUES_FOLDER =os.path.join(root,date_time + '-Data_Entries_Issues')\n config.DEMAMD_ENTRY_ISSUES_FOLDER =os.path.join(root,date_time + '-Demand_Entries_Issues')\n if not os.path.exists(config.DATA_ENTRY_ISSUES_FOLDER) :\n os.makedirs(config.DATA_ENTRY_ISSUES_FOLDER)\n if not os.path.exists(config.DEMAMD_ENTRY_ISSUES_FOLDER) :\n os.makedirs(config.DEMAMD_ENTRY_ISSUES_FOLDER)\n if not os.path.exists(os.path.join(config.DATA_ENTRY_ISSUES_FOLDER,\"DATE_ERROR\")) :\n os.makedirs(os.path.join(config.DATA_ENTRY_ISSUES_FOLDER,\"DATE_ERROR\"))\n with io.open(config.TENANT_JSON, encoding=\"utf-8\") as f:\n cb_module_data = json.load(f)\n ####Only for some CBs\n # cityToInclude = getCitiesToInclude(cityToSkip,cb_module_data)\n for found_index, cityname in enumerate(cityToInclude):\n cityname =cityname.lower()\n config.errormsg=[]\n name = 'CB ' + cityname\n if os.path.exists( os.path.join(root,name)): \n try : \n if True:# cityname == 'jutogh' :\n print(\"Processing for CB \"+cityname.upper())\n config.CITY_NAME = cityname\n cbMain(cityname, successlogfile, notsuccesslogfile)\n except Exception as ex: \n print(\"Error in processing CB \",cityname , ex)\n traceback.print_exc()\n errorlogfile.write(cityname+\"\\n\")\n if len(config.errormsg ) > 0 : \n dateerror = open(os.path.join(config.DATA_ENTRY_ISSUES_FOLDER,\"DATE_ERROR\",cityname+ \"dateError.txt\"), \"w\") \n for element in config.errormsg:\n dateerror.write(element + \"\\n\") \n dateerror.close()\n\n #### For all CBs\n # for found_index, module in enumerate(cb_module_data[\"tenants\"]):\n # if module[\"city\"][\"ulbGrade\"]==\"ST\":\n # continue\n # cityname =module[\"code\"].lower()[3:]\n # config.errormsg=[]\n # name = 'CB ' + cityname.lower()\n # if os.path.exists( os.path.join(root,name)): \n # try : \n # if True:# cityname == 'subathu' :\n # print(\"Processing for CB \"+cityname.upper())\n # config.CITY_NAME = cityname\n # cbMain(cityname, successlogfile, notsuccesslogfile)\n # except Exception as ex: \n # print(\"Error in processing CB \",cityname , ex)\n # traceback.print_exc()\n # errorlogfile.write(cityname+\"\\n\")\n # if len(config.errormsg ) > 0 : \n # dateerror = open(os.path.join(config.DATA_ENTRY_ISSUES_FOLDER,\"DATE_ERROR\",cityname+ \"dateError.txt\"), \"w\") \n # for element in config.errormsg:\n # dateerror.write(element + \"\\n\") \n # dateerror.close()\n \n errorlogfile.close()\n successlogfile.close() \n if len(config.error_in_excel) > 0 : \n cbHaveExcelIssue = open(os.path.join(config.DATA_ENTRY_ISSUES_FOLDER,\"_CB_HAVE_EXCEL_ISSUE.txt\"), \"w\") \n for element in config.error_in_excel:\n cbHaveExcelIssue.write(element + \"\\n\") \n cbHaveExcelIssue.close()\n \n if len(config.error_in_multiple_owner) > 0 : \n cbHaveMultipleOwnerIssue = open(os.path.join(config.DATA_ENTRY_ISSUES_FOLDER,\"_CB_HAVE_MULTIPLE_OWNER_ISSUE.txt\"), \"w\") \n for element in config.error_in_multiple_owner:\n cbHaveMultipleOwnerIssue.write(element + \"\\n\") \n cbHaveMultipleOwnerIssue.close()\n\ndef getCitiesToInclude(cityToSkip,cb_module_data): \n cityToSkipLower = []\n for found_index, cityname in enumerate(cityToSkip):\n cityToSkipLower.append(cityname.lower())\n cityToSkipLower.sort()\n allCities = []\n cityToInclude = []\n for found_index, module in enumerate(cb_module_data[\"tenants\"]):\n if module[\"city\"][\"ulbGrade\"]==\"ST\":\n continue\n cityname =module[\"code\"].lower()[3:]\n allCities.append(cityname) \n try:\n allCities.sort()\n cityToInclude = np.setdiff1d(allCities, cityToSkipLower)\n # cityToInclude = set(allCities.sort()) - set(cityToSkipLower.sort()) \n except:\n traceback.print_exc() \n return cityToInclude\n\ndef cbMain(cityname, successlogfile,notsuccesslogfile):\n Flag =False\n tenantMapping={}\n count = 0\n with io.open(config.TENANT_JSON, encoding=\"utf-8\") as f:\n cb_module_data = json.load(f)\n for found_index, module in enumerate(cb_module_data[\"tenants\"]):\n if module[\"city\"][\"ulbGrade\"]==\"ST\":\n continue\n tenantMapping[module[\"code\"].lower()]=module[\"code\"].lower()[3:]\n\n # Doing for one cb at a time\n root = FOLDER_PATH\n name = 'CB ' + cityname.lower()\n demandFile =os.path.join(root, name,'Legacy Demand-' + cityname + '.xlsx')\n logfile = open(os.path.join(root, name, \"Logfile.json\"), \"w\") \n connlogfile = open(os.path.join(root, name, \"ConnLogfile.json\"), \"w\") \n countfile = open(os.path.join(root, name, \"count.txt\"), \"w\") \n logfile.write(\"[ \")\n connlogfile.write(\"[ \")\n if os.path.exists(demandFile) : \n validate = validateDataForDemand(demandFile, logfile, cityname)\n if(validate == False): \n print('Data validation for Demand Failed, Please check the log file.') \n if config.INSERT_DATA: \n return\n else:\n print('Data validation for Demand success.') \n wb_demand = openpyxl.load_workbook(demandFile) \n sheet1 = wb_demand.get_sheet_by_name('Demand') \n \n if config.INSERT_DATA:\n createDemands(sheet1, cityname, logfile, root, name, countfile, connlogfile) \n wb_demand.save(demandFile) \n wb_demand.close()\n else:\n print(\"Deamand File doesnot exist for \", cityname) \n\n logfile.seek(logfile.tell() - 1, os.SEEK_SET)\n logfile.write('')\n logfile.write(\"]\") \n\n connlogfile.seek(connlogfile.tell() - 1, os.SEEK_SET)\n connlogfile.write('')\n connlogfile.write(\"]\") \n\n size = os.path.getsize(os.path.join(root, name, \"Logfile.json\")) \n logfile.close()\n size1 = os.path.getsize(os.path.join(root, name, \"ConnLogfile.json\")) \n connlogfile.close() \n try : \n if size > 2 : \n df = pd.read_json (os.path.join(root, name, \"Logfile.json\"))\n notsuccesslogfile.write(cityname)\n notsuccesslogfile.write(\"\\n\") \n df.to_excel(os.path.join(config.DATA_ENTRY_ISSUES_FOLDER , name + \" Data Entries Issues.xlsx\"), index = None)\n elif size1 > 2 : \n df = pd.read_json (os.path.join(root, name, \"ConnLogfile.json\"))\n notsuccesslogfile.write(cityname)\n notsuccesslogfile.write(\"\\n\") \n df.to_excel(os.path.join(config.DEMAND_ENTRY_ISSUES_FOLDER , name + \" Demand Entries Issues.xlsx\"), index = None)\n else : \n successlogfile.write(cityname)\n successlogfile.write(\"\\n\") \n except Exception as ex: \n print(\"Error in parsing json file\",ex)\n\ndef validateDataForDemand(propertyFile, logfile, cityname):\n validated = True\n reason = ''\n try:\n wb_property = openpyxl.load_workbook(propertyFile) \n sheet1 = wb_property.get_sheet_by_name('Demand') \n validated = ValidateCols(logfile, propertyFile, sheet1)\n if not validated :\n print(\"Column Mismatch, sheets needs to be corrected\")\n config[\"error_in_excel\"].append(cityname +\" have column issue in property sheet\")\n\n # print('no. of rows in Property file sheet 1: ', sheet2.max_row ) \n emptyRows=0\n count =0 \n for row in sheet1.iter_rows(min_row=2, max_col=5, max_row=sheet1.max_row ,values_only=True): \n try : \n if isna(row[1]) :\n continue\n if isna(row[1]):\n validated = False\n reason = 'old connection number is empty'\n writeDemandLog(logfile,getValue(row[0], int, ''),reason, getValue(row[1], str, ''))\n if not isna(row[2]):\n if getValue(row[2], float, 0) < 0.0 or not bool(re.match(\"^\\d*(\\.[0-9]{1,2})*$\",getValue(row[2], str, ''))):\n validated = False\n reason = 'water charge is not correct'\n writeDemandLog(logfile,getValue(row[0], int, ''),reason, getValue(row[1], str, ''))\n if not isna(row[3]):\n if getValue(row[3], float, 0) < 0.0 or not bool(re.match(\"^\\d*(\\.[0-9]{1,2})*$\",getValue(row[3], str, ''))):\n validated = False\n reason = 'advance is not correct'\n writeDemandLog(logfile,getValue(row[0], int, ''),reason, getValue(row[1], str, ''))\n except Exception as ex:\n print(config.CITY_NAME,\" validateDataForDemand Exception: \",getValue(row[0], str, ''), ' ',ex)\n traceback.print_exc()\n\n connection_ids = []\n for index in range(2, sheet1.max_row +1):\n try: \n if isna(sheet1['B{0}'.format(index)].value): \n break\n connectionId = getValue(sheet1['B{0}'.format(index)].value, str, '')\n connection_ids.append(connectionId)\n except Exception as ex:\n traceback.print_exc()\n print( config.CITY_NAME, \" validateDataForProperty Exception: abas id is empty: \",getValue(row[0], int, ''), ' ',ex)\n duplicate_ids = [item for item, count in collections.Counter(connection_ids).items() if count > 1]\n\n # if(len(duplicate_ids) >= 1):\n # validated = False\n # write(logfile,None,'Duplicate old connection id for '+ str(duplicate_ids)) \n except Exception as ex:\n traceback.print_exc()\n return validated\n\ndef ValidateCols(logfile, propertyFile, sheet1):\n proper_column_order1 = ['Sl No', 'Old Connection No','Water charge','Penalty','Advance']\n validated = True\n column_list = [c.value for c in next(sheet1.iter_rows(min_row=1, max_row=1))]\n try:\n for i in range(0, 4):\n if(proper_column_order1[i].strip() != column_list[i].strip()) :\n print('Demand file: ', column_list[i])\n validated = False\n write(logfile,propertyFile,sheet1.title,None,'Column order / name is not correct',column_list[i])\n # break\n\n except Exception as ex:\n validated = False\n print(config.CITY_NAME,\" validateCols Demand Exception: \",ex)\n traceback.print_exc()\n return validated \n\n\ndef createDemands(sheet1, cityname, logfile,root, name, countfile, connlogfile):\n\n createdCount = 0\n searchedCount = 0\n notCreatedCount = 0\n auth_token = superuser_login()[\"access_token\"]\n tenantId = 'pb.'+ cityname\n \n index = 2\n waterDemandsObj = [] \n demands = WaterDemands() \n for row in sheet1.iter_rows(min_row=2, max_col=6, max_row=sheet1.max_row ,values_only=True): \n waterDemand = WaterDemand() \n waterConnection = WaterConnection()\n index = index + 1 \n try:\n if isna(getValue(row[1], str, None)) :\n continue\n waterDemand.oldConnectionNo= getValue(row[1], str, '')\n status, res = waterConnection.search_water_connection(auth_token, tenantId, waterDemand.oldConnectionNo) \n if(len(res['WaterConnection']) > 0): \n # print(getValue(row[2], float, 0))\n if not (isna(row[2]) and isna(row[3]) and isna(row[4])) and not (getValue(row[2], int, 0) == 0 and getValue(row[3], int, 0) == 0 and getValue(row[4], int, 0) == 0) : \n waterDemand.waterCharge = getValue(row[2], float, 0)\n waterDemand.penalty = getValue(row[3], float, 0) \n waterDemand.advance = getValue(row[4], float, 0) \n waterDemandsObj.append(waterDemand)\n except Exception as ex:\n print(config.CITY_NAME,\" createDemands Exception: \",ex)\n traceback.print_exc() \n demands.waterDemands = waterDemandsObj\n statusCode, res = demands.upload_demand(auth_token, tenantId, demands, root, name)\n if len(res) > 0 :\n errorDemandConn = open(os.path.join(config.DEMAMD_ENTRY_ISSUES_FOLDER, name + \"Demand Entry Issues.xlsx\"), \"w\") \n for connection in res:\n writeErrorDemandConn(connlogfile, getValue(connection, str, ''))\n \n\n # with io.open(os.path.join(root, name,str(property.abasPropertyId) + \"_property_create_res.json\"), mode=\"w\", encoding=\"utf-8\") as f:\n # json.dump(res, f, indent=2, ensure_ascii=False)\n # if statusCode == 200 :\n # for found_index, resProperty in enumerate(res[\"Properties\"]):\n # propertyId = resProperty[\"propertyId\"] \n # createdCount = createdCount + 1\n # break\n # elif statusCode == 400 :\n # with io.open(os.path.join(root, name, \"failedConnections.json\"), mode=\"w\", encoding=\"utf-8\") as f:\n # json.dump(res, f, indent=2, ensure_ascii=False) \n # reason = 'Demand not created status code '+ str(statusCode) + ' for connection id ' + str(property.abasPropertyId) + ' response: ', str(res) + '\\n'\n # print(reason)\n # notCreatedCount = notCreatedCount + 1\n \n reason = 'Demand created count: '+ str(createdCount)\n print(reason)\n countfile.write(reason)\n countfile.write('\\n')\n reason = 'Demand not created count: '+ str(notCreatedCount)\n print(reason)\n countfile.write(reason)\n countfile.write('\\n')\n reason = 'Demand searched count: '+ str(searchedCount)\n print(reason)\n countfile.write(reason)\n countfile.write('\\n')\n\nif __name__ == \"__main__\":\n # print(USAGE_MAP)\n main()\n \n \n \n\n\n","sub_path":"CreateWaterLegacyDemand.py","file_name":"CreateWaterLegacyDemand.py","file_ext":"py","file_size_in_byte":15958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"315940373","text":"class Solution(object):\n def addStrings(self, num1, num2):\n \"\"\"\n :type num1: str\n :type num2: str\n :rtype: str\n \"\"\"\n if len(num1) >= len(num2):\n num2 = \"0\"*(len(num1)-len(num2))+num2\n else:\n num1 = \"0\"*(len(num2)-len(num1))+num1\n ans = deque([])\n carry = 0\n for i in range(-1, -len(num1)-1, -1):\n val = ord(num1[i])+ord(num2[i])-2*ord('0')+carry\n carry = val//10\n ans.appendleft(val%10)\n if carry:\n ans.appendleft(carry)\n return \"\".join(map(str, ans))\n# Runtime: 20 ms, faster than 95.54% of Python online submissions for Add Strings.\n# Memory Usage: 13.9 MB, less than 8.63% of Python online submissions for Add Strings.\n\nclass Solution(object):\n def addStrings(self, num1, num2):\n \"\"\"\n :type num1: str\n :type num2: str\n :rtype: str\n \"\"\"\n if len(num1) >= len(num2):\n long = (len(num1), num1)\n short = (len(num2), num2)\n else: \n long = (len(num2), num2)\n short = (len(num1), num1)\n ans = 0\n for i in range(long[0]):\n a = ord(long[1][long[0]-1-i])-ord('0')\n b = ord(short[1][short[0]-1-i])-ord('0') if i < short else 0\n ans += (a+b)*10**(i)\n return str(ans)","sub_path":"415. Add Strings.py","file_name":"415. Add Strings.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"297482553","text":"\"\"\"\nCopyright 2017 Steven Diamond\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport cvxpy.settings as s\nfrom cvxpy.problems.solvers.scs_intf import SCS\n\n\nclass SUPERSCS(SCS):\n \"\"\"An interface for the SuperSCS solver.\n \"\"\"\n\n def name(self):\n \"\"\"The name of the solver.\n \"\"\"\n return s.SUPERSCS\n\n def import_solver(self):\n \"\"\"Imports the solver.\n \"\"\"\n import superscs\n superscs # For flake8\n\n def solve(self, objective, constraints, cached_data,\n warm_start, verbose, solver_opts):\n \"\"\"Returns the result of the call to the solver.\n\n Parameters\n ----------\n objective : LinOp\n The canonicalized objective.\n constraints : list\n The list of canonicalized cosntraints.\n cached_data : dict\n A map of solver name to cached problem data.\n warm_start : bool\n Should the previous solver result be used to warm_start?\n verbose : bool\n Should the solver print output?\n solver_opts : dict\n Additional arguments for the solver.\n\n Returns\n -------\n tuple\n (status, optimal value, primal, equality dual, inequality dual)\n \"\"\"\n import superscs\n data = self.get_problem_data(objective,\n constraints,\n cached_data)\n # Set the options to be VERBOSE plus any user-specific options.\n solver_opts[\"verbose\"] = verbose\n scs_args = {\"c\": data[s.C], \"A\": data[s.A], \"b\": data[s.B]}\n # If warm_starting, add old primal and dual variables.\n solver_cache = cached_data[self.name()]\n if warm_start and solver_cache.prev_result is not None:\n scs_args[\"x\"] = solver_cache.prev_result[\"x\"]\n scs_args[\"y\"] = solver_cache.prev_result[\"y\"]\n scs_args[\"s\"] = solver_cache.prev_result[\"s\"]\n\n results_dict = superscs.solve(scs_args, data[s.DIMS], **solver_opts)\n return self.format_results(results_dict, data, cached_data)\n","sub_path":"cvxpy/problems/solvers/superscs_intf.py","file_name":"superscs_intf.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"202954063","text":"from datetime import time, datetime\n\nimport requests\nimport pytz\n\n\ndef load_attempts(url_api):\n page_number = 1\n while True:\n payload = {'page': page_number}\n response = requests.get(url_api, params=payload)\n records = response.json()['records']\n for record in records:\n yield record\n\n page_number += 1\n if page_number > response.json()['number_of_pages']:\n break\n\n\ndef get_midnighters(attempts):\n start_time = 0\n end_time = 6\n midnighters = set()\n for attempt in attempts:\n time_zone = pytz.timezone(attempt['timezone'])\n local_datetime = datetime.fromtimestamp(\n attempt['timestamp'],\n time_zone\n )\n if (end_time > local_datetime.time().hour >= start_time):\n midnighters.add(attempt['username'])\n return midnighters\n\n\ndef print_output(midnighters):\n print(\"Users, who have sent a solution after 00-00:\")\n for midnighter in midnighters:\n print(midnighter)\n\n\ndef main():\n url_api = 'https://devman.org/api/challenges/solution_attempts/'\n attempts = load_attempts(url_api)\n midnighters = get_midnighters(attempts)\n print_output(midnighters)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"seek_dev_nighters.py","file_name":"seek_dev_nighters.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"397199333","text":"import pandas as pd\nfrom kabutobashi.errors import StockDfError\n\n\nclass StockDf(object):\n \"\"\"\n 様々な値の保持に用いるクラス。\n 値を代入する際にバリデーションが行われる。\n \"\"\"\n def __init__(self):\n \"\"\"\n \"\"\"\n self.name = None\n self.internal_name = None\n\n def __get__(self, instance, instance_type):\n return getattr(instance, self.internal_name, None)\n\n def __set__(self, instance, value):\n if value is None:\n raise StockDfError(\"required\")\n\n df_columns = value.columns\n if \"code\" in df_columns:\n code = list(set(value.code.values))\n if len(code) > 1:\n raise StockDfError(\"multiple code\")\n elif len(code) == 0:\n raise StockDfError(\"no code\")\n\n # 日付カラムの候補値を探す\n date_column = None\n if \"date\" in df_columns:\n date_column = \"date\"\n elif \"dt\" in df_columns:\n date_column = \"dt\"\n if date_column is None:\n raise StockDfError(\"日付のカラム[dt, date]のいずれかが存在しません\")\n if \"date\" in df_columns and \"dt\" in df_columns:\n raise StockDfError(\"日付のカラム[dt, date]は片方しか存在できません\")\n\n # indexにdateを指定\n value.index = pd.to_datetime(value[date_column])\n\n # 必要なカラムに絞る\n value = value.loc[:, [\"open\", \"high\", \"low\", \"close\"]]\n open_s = value['open'].apply(self._replace_comma)\n close_s = value['close'].apply(self._replace_comma)\n high_s = value['high'].apply(self._replace_comma)\n low_s = value['low'].apply(self._replace_comma)\n new_value = pd.DataFrame({\"open\": open_s, \"high\": high_s, \"low\": low_s, \"close\": close_s})\n # 型の指定\n setattr(instance, self.internal_name, new_value)\n\n @staticmethod\n def _replace_comma(x) -> float:\n \"\"\"\n pandas内の値がカンマ付きの場合に、カンマを削除する関数\n :param x:\n :return:\n \"\"\"\n if type(x) is str:\n x = x.replace(\",\", \"\")\n try:\n f = float(x)\n except ValueError as e:\n raise StockDfError(f\"floatに変換できる値ではありません。{e}\")\n return f\n","sub_path":"kabutobashi/attributes/stock_df.py","file_name":"stock_df.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"543676885","text":"from django.contrib.auth.models import AnonymousUser\n\ndef profile_from_request(request):\n \"\"\"\n Get the profile of the user in the request, or None if not logged in\n :param request: Http request\n :return: request.user's Profile or None\n \"\"\"\n if isinstance(request.user, AnonymousUser):\n return None\n else:\n return request.user.user_profile\n\n\ndef pkgen(length):\n from base64 import b32encode\n from hashlib import sha1\n from random import random\n rude = ('fuck', 'shit', 'damn', 'bitch', 'hell',)\n bad_pk = True\n while bad_pk:\n pk = b32encode(sha1(str(random())).digest()).lower()[:length]\n bad_pk = False\n for rw in rude:\n if pk.find(rw) >= 0: bad_pk = True\n return pk","sub_path":"notes/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"27305601","text":"class InvalidBedFormatException(Exception):\n def __init__(self, message, file, line):\n super().__init__(message)\n self.message = message\n self.file = file\n self.line = line\n\n def __str__(self):\n return \"InvalidBedFormatException: \" + self.message + \" \" + self.file + \".\\n\" + \"Line: \" + self.line.rstrip()\n\n\ndef __verify_file(filename):\n with open(filename) as file:\n first_line = file.readlines()[0]\n\n if len(first_line.split()) != 6:\n raise InvalidBedFormatException(\"Not all columns found in \", filename, first_line)\n\n chromosome, left, right, name, score, strand = first_line.split()\n\n try:\n left = int(left)\n except ValueError as e:\n raise InvalidBedFormatException(\"Left value is not an integer in \", filename, first_line)\n\n try:\n right = int(right)\n except ValueError as e:\n raise InvalidBedFormatException(\"Right value is not an integer in \", filename, first_line)\n\n if strand != \"+\" and strand != \"-\":\n raise InvalidBedFormatException(\"Strand is not + or - in \", filename, first_line)\n\n\ndef verify_bed_files(*filenames):\n \"\"\"\n Raises a InvalidBedFormatException error if one of the files is not in bed format (chromosome, left, right,\n name, score, strand)\n\n :param files: files to be deleted\n :type files: str or list\n \"\"\"\n for filename in filenames:\n if isinstance(filename, list):\n for sub_filename in filename:\n __verify_file(sub_filename)\n else:\n __verify_file(filename)\n","sub_path":"version 2/utils/verify_bed_file.py","file_name":"verify_bed_file.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"652583340","text":"'''\n K out of K Visual Encryption Scheme\n'''\n\nfrom __future__ import print_function\nimport itertools\nimport numpy as np\nimport random\nimport time\n\n# K out of K scheme\n# constructs K shares from a black and white image\n\n'''\n makeW()\n\n generates the groundset W.\n'''\ndef makeW (k):\n W = []\n i = 1\n while (i <= k):\n W.append(i)\n i += 1\n return W\n\n# returns a tuple of pi,sigma as a list based on k\n# credit to: http://pythonfiddle.com/a-list-of-subsets-of-a-list/\n'''\n makePitSigma()\n \n generates the pi set (the even cardinality set)\n and the sigma set (the odd cardinality set) from\n the ground set.\n'''\ndef makePiSigma (W):\n pi = []\n sigma = []\n for i in range(0, len(W)+1):\n subset = [list(sub) for sub in itertools.combinations(W, i)]\n if (len(subset)%2 == 0):\n pi.extend(subset)\n else:\n sigma.extend(subset)\n pi.insert(0,0)\n print(pi)\n return([pi, sigma])\n\n'''\n search()\n \n search through the given\n set for the search target.\n'''\ndef search (target, search_space):\n if isinstance(search_space, int): \n val = target == search_space\n print(target)\n print(val)\n return val\n if target in search_space:\n print(\"Found in search! Returning True\")\n return True\n else:\n print(\"Not found! Returning False\")\n return False\n\n'''\n makeS()\n\n generates a S0 set.\n'''\ndef makeS (W, ps):\n #set size of s0 to be list of lists with sizes k and 2^(k-1)\n k = len(W)\n col = pow(2,(k-1))\n s = [[2 for x in range(col)] for y in range(k)]\n for i in range(len(s)):\n for j in range(len(s[i])):\n if ( search(W[i],ps[j]) ):\n s[i][j] = 0\n else:\n s[i][j] = 1\n return s\n\n'''\n permuteMatrix()\n\n @param matrix The matrix to be permuted\n @return the column permuted matrix\n'''\ndef permuteMatrix (matrix):\n matrix = np.array(matrix)\n cols = len(matrix[0])\n for i in range(0,cols):\n rand1 = random.randint(0,cols-1)\n rand2 = random.randint(0,cols-1)\n\n #taken out until such time that we decide it should go back in\n #made the results somewhat more regular but it was a performance overhead\n # while(rand1 == rand2 and cols<100):\n # rand1 = random.randint(0,cols-1)\n # rand2 = random.randint(0,cols-1)\n matrix[:,[rand1,rand2]] = matrix[:,[rand2,rand1]]\n return matrix\n\n'''\n koutofk()\n\n the main function for carrying out k out of k\n image secret splitting. \n'''\ndef koutofk (k, Matrix):\n W = makeW(k)\n fullset = makePiSigma(W)\n pi = fullset[0]\n sigma = fullset[1]\n # creates an S0 matrix such that S0 = S0[i,j] = 1 iff ei in pij\n s0 = makeS(W, pi)\n # creates an S1 matrix such that S1 = S1[i,j] = 1 iff e1 in sigmaj\n s1 = makeS(W, sigma)\n\n #TODO: will probably want to change this to binary writing to reduce size of the file\n # startTime = time.time()\n shares = [object] * k\n for i in range(0, k):\n shares[i] = open(\"share\" + str(i), \"w\")\n\n # print(\"Creating files took:\", time.time() - startTime)\n # convert a 2D array to k shares and write those shares to files\n # startTime = time.time()\n\n for line in Matrix:\n for pixel in line:\n # pixelTime = time.time()\n #choose a permutation randomly of either S0 or S1\n # matrixTime = time.time()\n if pixel == 1:\n out = permuteMatrix(s0) # white pixel\n else:\n out = permuteMatrix(s1) # black pixel\n # print(\"time to permutate:\", time.time() - matrixTime)\n # distribute the permutation among the shares\n for i in range(0, k):\n for subpixel in out[i]:\n shares[i].write(str(subpixel))\n # print(\"time for one pixel:\", time.time() - pixelTime)\n for i in range(0, k):\n shares[i].write(\"\\n\")\n for i in range(0, k):\n shares[i].close()\n\n # print(\"Creating shares took:\", time.time() - startTime)\n\n return 0\n\n'''\n koutofk_to3D_Matrix()\n'''\ndef koutofk_to3D_Matrix(k, Matrix):\n if k % 2 == 0:\n print(\"Invalid k:\",k)\n return [[]]\n W = makeW(k)\n fullset = makePiSigma(W)\n pi = fullset[0]\n sigma = fullset[1]\n # creates an S0 matrix such that S0 = S0[i,j] = 1 iff ei in pij\n s0 = makeS(W, pi)\n # creates an S1 matrix such that S1 = S1[i,j] = 1 iff e1 in sigmaj\n s1 = makeS(W, sigma)\n\n # startTime = time.time()\n side_len = 2 << int(((k-1)/2) - 1) #(2^(k-1))/2 is the size of a side of a pixel in subpixels\n #print(side_len)\n pixels = 0\n for pixel in Matrix[0]:\n pixels += 1\n matrix_width = side_len * pixels #side_length * k^(k-1) * width of picture\n lines = 0\n for line in Matrix:\n lines += 1\n matrix_depth = side_len * lines\n outMatrix = np.zeros((k, matrix_depth, matrix_width), dtype=np.uint8)\n doffset = 0\n for line in Matrix:\n woffset = 0\n for pixel in line:\n # pixelTime = time.time()\n #choose a permutation randomly of either S0 or S1\n # matrixTime = time.time()\n if pixel == 1:\n out = permuteMatrix(s0) # white pixel\n else:\n out = permuteMatrix(s1) # black pixel\n # print(\"time to permutate:\", time.time() - matrixTime)\n #distribute the permutation among the shares\n for i in range(0, k):\n pos = 0\n for depth in range(doffset, doffset + side_len):\n for width in range(woffset, woffset + side_len):\n #print(\"i, woffset, depth, width, subpixel:\",i,woffset,depth,width,out[i][pos])\n outMatrix[i][depth][width] = out[i][pos]\n pos += 1\n woffset += side_len\n # print(\"time for one pixel:\", time.time() - pixelTime)\n doffset += side_len\n\n # print(\"Creating shares took:\", time.time() - startTime)\n\n return outMatrix\n\n'''\n toImage()\n'''\ndef toImage(k):\n #TODO: eventually shares will come as arguments\n # startTime = time.time()\n share = open(\"share0\", \"r\")\n num_lines = sum(1 for line in share) #assume the files are the same sizes (should be anyway)\n share.close()\n shares = [object] * k\n for i in range(0, k):\n shares[i] = open(\"share\" + str(i), \"r\")\n\n #compute the length of a individual pixel's share\n length = 2 << (k-2) #same as 2^(k-1)\n # print(\"num_lines\", num_lines)\n num_pixels = len(shares[0].readline())/length\n shares[0].seek(0,0)\n Matrix = np.zeros((num_lines, num_pixels), dtype=np.uint8)\n for i in range(0, num_lines):\n lines = [object] * k\n for x in range(0, k):\n lines[x] = shares[x].readline()\n lines[x] = lines[x][:-1] #slice off the newline character of the line\n\n beg = 0 #The first digit of a share\n\n while beg < len(lines[0]):\n white=False\n for x in range(beg, beg + length):\n #if there's a single matching of all white subpixels than the pixel must be white\n w = True\n for line in lines:\n if line[x] != \"1\":\n w = False\n if w:\n white=True\n #print results out to console\n #Prints out in as 0 or 1 in the place where that pixel would be in the image\n #ie\n # 100\n # 011\n # 101\n #for a image that is 3x3 pixels\n #TODO: for generating the image, what is the proper format? -> 2D array\n # print(i, beg/length, end=\"\")\n if(white):\n Matrix[i][beg/length] = 1\n # print(\"-> 0\")\n else:\n Matrix[i][beg/length] = 0\n # print(\"-> 1\")\n beg += length\n # print(\"Shares -> pixels took:\", time.time() - startTime)\n return Matrix\n\n'''\n stack_images()\n'''\ndef stack_images(Images):\n #set up the output Matrix which will be the dimensions of the original matrix\n num_pixels = sum(1 for subpixel in Images[0][0])\n num_lines = sum(1 for subpixel in Images[0])\n outMatrix = np.zeros((num_lines, num_pixels), dtype=np.uint8)\n for line in range(0, num_lines):\n for pixel in range(0, num_pixels):\n white = True\n for image in Images:\n if image[line][pixel] == 0:\n white = False\n if white:\n outMatrix[line][pixel] = 1\n else:\n outMatrix[line][pixel] = 0\n return outMatrix\n\n \n'''\n toImage_fr3D()\n'''\ndef toImage_fr3D(k, Matrix):\n #set up the output Matrix which will be the dimensions of the original matrix\n subpixels = 2 << int(((k-1) /2) - 1)\n num_pixels = sum(1 for subpixel in Matrix[0][0]) // subpixels\n num_lines = sum(1 for subpixel in Matrix[0]) // subpixels\n outMatrix = np.zeros((num_lines, num_pixels), dtype=np.uint8)\n #Calculate the value of each pixel by combining squares of subpixels between the shares\n for i in range(num_lines):\n for j in range(num_pixels):\n white = False\n for depth in range(i*subpixels, (i+1)*subpixels):\n for width in range(j*subpixels, (j+1)*subpixels):\n w = True\n for share in Matrix:\n # Can be used with 0 or 1 or with 0 or 255 images\n if share[depth][width] == 0:\n w = False\n if w:\n white = True\n if white:\n outMatrix[i][j] = 1\n else:\n outMatrix[i][j] = 0\n return outMatrix\n\n","sub_path":"VSS-Encryption-master/docs/kofk.py","file_name":"kofk.py","file_ext":"py","file_size_in_byte":9342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"328138221","text":"import os, sys\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt, colorbar\nfrom typing import Tuple, List\nfrom numbers import Number\nimport ensembler.potentials.TwoD as pot2D\n\n\nSMALL_SIZE = 8\nMEDIUM_SIZE = 10\nBIGGER_SIZE = 12\n\ncmap = \"tab20b\"\n\nplt.rc('font', size=SMALL_SIZE) # controls default text sizes\nplt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\nplt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\nplt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\nplt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\nplt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\nplt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n\n\nfrom ensembler.potentials import OneD as pot, ND as nDPot\nfrom ensembler.potentials._baseclasses import _potential1DCls, _perturbedPotentialNDCls\n\n#UTIL FUNCTIONS\ndef significant_decimals(s:float)->float:\n significant_decimal=2\n if(s % 1 != 0):\n decimals = str(float(s)).split(\".\")[-1]\n for digit in decimals:\n if(digit == \"0\"):\n significant_decimal +=1\n else:\n return round(s, significant_decimal)\n else:\n return s\n\ndef plot_1DPotential(potential: _potential1DCls, positions:list,\n x_range=None, y_range=None, title:str=None, ax=None):\n # generat Data\n energies = potential.ene(positions=positions)\n\n # is there already a figure?\n if (ax == None):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n else:\n fig = None\n\n # plot\n ax.plot(positions, energies)\n ax.set_xlim(min(x_range), max(x_range)) if (x_range!=None) else ax.set_xlim(min(positions), max(positions))\n ax.set_ylim(min(y_range), max(y_range)) if (y_range!=None) else ax.set_ylim(min(energies), max(energies))\n\n ax.set_xlabel('$x$')\n ax.set_ylabel('$Potential [kj]$')\n ax.set_title(title) if (title != None) else ax.set_title(\"Potential \"+str(potential.name))\n\n if(ax != None):\n return fig, ax\n else:\n return ax\n pass\n\ndef plot_1DPotential_dhdpos(potential: _potential1DCls, positions:list,\n x_range=None, y_range=None, title:str=None, ax=None):\n # generat Data\n energies = potential.dvdpos(positions=positions)\n\n # is there already a figure?\n if (ax == None):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n else:\n fig = None\n\n # plot\n ax.plot(positions, energies)\n ax.set_xlim(min(x_range), max(x_range)) if (x_range!=None) else ax.set_xlim(min(positions), max(positions))\n ax.set_ylim(min(y_range), max(y_range)) if (y_range!=None) else ax.set_ylim(min(energies), max(energies))\n\n ax.set_xlabel('$x$')\n ax.set_ylabel('$Potential [kj]$')\n ax.set_title(title) if (title != None) else ax.set_title(\"Potential \"+str(potential.name))\n\n if(ax != None):\n return fig, ax\n else:\n return ax\n pass\n\n\ndef plot_1DPotential_Term(potential:_potential1DCls, positions: list,\n x_range=None, y_range=None, title: str = None, ax=None):\n fig, axes = plt.subplots(nrows=1, ncols=2)\n plot_1DPotential(potential=potential, positions=positions, ax=axes[0], x_range=x_range, y_range=y_range, title=\"Pot\")\n plot_1DPotential_dhdpos(potential=potential, positions=positions, ax=axes[1], x_range=x_range, y_range=y_range, title=\"dhdpos\")\n fig.tight_layout()\n fig.suptitle(title) if(title!=None) else fig.suptitle(\"Potential \"+str(potential.name))\n return fig, axes\n\ndef plot_1DPotential_Termoverlay(potential: _potential1DCls, positions:list,\n x_range=None, y_range=None, title: str = None, ax=None):\n #generate dat\n energies = potential.ene(positions=positions)\n dVdpos = potential.dhdpos(positions=positions)\n\n # is there already a figure?\n if (ax == None):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n else:\n fig = None\n\n ax.plot(positions, energies, label=\"V\")\n ax.plot(positions, list(map(abs, dVdpos)), label=\"absdVdpos\")\n ax.plot(positions, dVdpos, label=\"dVdpos\")\n ax.set_xlim(min(x_range), max(x_range)) if (x_range!=None) else ax.set_xlim(min(positions), max(positions))\n ax.set_ylim(min(y_range), max(y_range)) if (y_range!=None) else ax.set_ylim(min([min(energies), min(dVdpos)]), max([max(energies), max(dVdpos)]))\n\n ax.ylabel(\"$Potential/kJ$\")\n ax.xlabel(\"$x$\")\n ax.legend()\n ax.set_title(title) if (title != None) else ax.set_title(\"Potential \"+str(potential.__name__))\n\n if(ax != None):\n return fig, ax\n else:\n return ax\n\n\n\n\"\"\"\n 2D Plotting Functions\n\"\"\"\ndef plot_2DPotential(V:pot2D._potential2DClsSymPY, positions2D:List[Tuple[Number,Number]]=None, title:str=None, x_label:str=None, y_label:str=None, space_range:Tuple[Number, Number]=(-10, 10), point_resolution:int=1000, ax=None, show_plot:bool=False, dpi:int=300)->(plt.Figure, plt.Axes, np.array):\n #build positions\n if(isinstance(positions2D, type(None))):\n minX, maxX = min(space_range), max(space_range)\n minY, maxY = min(space_range), max(space_range)\n positions = np.linspace(min(space_range), max(space_range), point_resolution)\n x_positions, y_positions = np.meshgrid(positions,positions)\n positions2D = np.array([x_positions.flatten(), y_positions.flatten()]).T\n else:\n positions2D = np.array(positions2D)\n minX, maxX = min(positions2D[:,0]), max(positions2D[:,0])\n minY, maxY = min(positions2D[:,1]), max(positions2D[:,1]) \n\n #landscapes\n V_pots = V.ene(positions2D)\n minV,maxV = np.min(V_pots), np.max(V_pots)\n V_land = V_pots.reshape([point_resolution,point_resolution])\n\n #make Figure\n if(isinstance(ax, type(None))):\n fig, ax = plt.subplots(ncols=1, dpi=dpi)\n else:\n fig = None\n\n surf = ax.imshow(V_land, cmap=\"tab20b\", extent=[minX, maxX, minY, maxY])\n ax.set_xlabel(\"x\")\n\n \n if(isinstance(x_label, type(None))):\n ax.set_xlabel(\"x\")\n else:\n ax.set_xlabel(x_label)\n\n \n if(isinstance(y_label, type(None))):\n ax.set_ylabel(\"y\")\n else:\n ax.set_ylabel(y_label)\n\n\n ax.set_xticks(np.linspace(minX, maxX+1, 5))\n ax.set_yticks(np.linspace(minY, maxY+1, 5))\n\n\n if(isinstance(title, type(None))):\n ax.set_title(\"Potential Landscape\")\n else:\n ax.set_title(title)\n\n #color bar:\n if(not isinstance(fig, type(None))):\n cbaxes = fig.add_axes([0.9, 0.1, 0.03, 0.8]) \n cb = plt.colorbar(surf, fraction=0.046, pad=0.04, cax = cbaxes, ticks=list(np.round(np.linspace(minV,maxV,5),2)))\n cb.set_label(\"V/[kT]\")\n\n fig.tight_layout()\n \n if(show_plot):\n fig.show()\n\n return fig, ax, surf\n\n\ndef plot_2DEnergy_landscape(potential1: _potential1DCls, potential2: _potential1DCls, positions1:list, positions2:list=None,\n x_range=None, y_range=None, z_range=None, title:str=None, colbar:bool=False, ax=None, cmap:str=\"inferno\"):\n #generat Data\n energy_map = []\n min_E, max_E = 0,0\n\n if(type(positions2)==type(None)):\n positions2 = positions1\n\n for pos in positions2:\n Va = potential2.ene(pos)[0]\n Vb = potential1.ene(positions1)\n Vtot = list(map(lambda x: x+Va, Vb))\n energy_map.append(Vtot)\n\n if(min(Vtot)max_E):\n max_E = max(Vtot)\n\n energy_map = np.array(energy_map)\n\n #is there already a figure?\n if(ax == None):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n colbar=True\n else:\n fig = None\n\n if(z_range==None):\n z_range = [min_E, max_E]\n\n #plot\n surf = ax.imshow(energy_map, cmap=cmap, interpolation=\"nearest\",\n origin='center', extent=[min(positions1), max(positions1), min(positions2), max(positions2)], vmax=max(z_range), vmin=min(z_range), aspect=\"auto\")\n\n if(colbar and fig != None):\n fig.colorbar(surf, aspect=5, label='Energy/kJ')\n\n if(x_range): ax.set_xlim(min(x_range), max(x_range))\n if(y_range): ax.set_ylim(min(y_range), max(y_range))\n\n ax.set_xlabel('$x1$')\n ax.set_ylabel('$x2$')\n if(title): ax.set_title(title)\n return fig, ax, surf\n\n\"\"\"\n MultiState Plotting Functions\n\"\"\"\n#1D\n\ndef plot_2perturbedEnergy_landscape(potential:_perturbedPotentialNDCls, positions:list, lambdas:list,\n x_range=None, lam_range=None, title:str=None, colbar:bool=False, ax=None):\n\n energy_map_lin = []\n for y in lambdas:\n potential.set_lam(y)\n energy_map_lin.append(potential.ene(positions))\n energy_map_lin = np.array(energy_map_lin)\n\n if(ax == None):\n fig = plt.figure(figsize=(15,5))\n ax = fig.add_subplot(111)\n colbar=True\n else:\n fig = None\n\n surf = ax.imshow(energy_map_lin, cmap=\"viridis\", interpolation=\"nearest\",\n origin='center', extent=[min(positions), max(positions), min(lambdas), max(lambdas)], vmax=100, vmin=0, aspect=\"auto\")\n\n if(colbar):\n colorbar.Colorbar(ax, surf, label='Energy')\n\n if(x_range): ax.set_xlim(min(x_range), max(x_range))\n if(lam_range): ax.set_ylim(min(lam_range), max(lam_range))\n ax.set_xlabel('x')\n ax.set_ylabel('$\\lambda$')\n if(title): ax.set_title(title)\n return fig, ax, surf\n\n#show feature landscape per s\ndef envPot_differentS_overlay_min0_plot(eds_potential:nDPot.envelopedPotential, s_values:list, positions:list,\n y_range:tuple=None, hide_legend:bool=False, title:str=None, out_path:str=None):\n #generate energy values\n ys = []\n scale = 1 # 0.1\n for s in s_values:\n eds_potential.s=s\n enes = eds_potential.ene(positions)\n y_min =min(enes)\n y=list(map(lambda z: (z-y_min)*scale, enes))\n ys.append(y)\n\n #plotting\n fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(20,10))\n for s, y in reversed(list(zip(s_values, ys))):\n axes.plot(positions, y, label=\"s_\"+str(significant_decimals(s)))\n\n if (y_range != None):\n axes.set_ylim(y_range)\n axes.set_xlim(min(positions),max(positions))\n\n #styling\n axes.set_ylabel(\"Vr/[kJ]\")\n axes.set_xlabel(\"r\")\n axes.set_title(\"different Vrs aligned at 0 with different s-values overlayed \")\n\n ##optionals\n if(not hide_legend): axes.legend()\n if(title): fig.suptitle(title)\n if(out_path): fig.savefig(out_path)\n fig.show()\n\n return fig, axes\n\n#show feature landscape per s\ndef envPot_differentS_overlay_plot(eds_potential:nDPot.envelopedPotential, s_values:list, positions:list,\n y_range:tuple=None, hide_legend:bool=False, title:str=None, out_path:str=None, axes=None):\n #generate energy values\n ys = []\n for s in s_values:\n eds_potential.s=s\n enes = eds_potential.ene(positions)\n ys.append(enes)\n\n #plotting\n if(axes == None):\n fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(20,10))\n else:\n fig = None\n\n for s, y in reversed(list(zip(s_values, ys))):\n axes.plot(positions, y, label=\"s_\"+str(significant_decimals(s)))\n\n #styling\n axes.set_xlim(min(positions),max(positions))\n axes.set_ylabel(\"Vr/[kJ]\")\n axes.set_xlabel(\"r\")\n if(title ==None):\n axes.set_title(\"different $V_{r}$s with different s-values overlayed \")\n else:\n axes.set_title(title)\n\n\n ##optionals\n if (y_range != None): axes.set_ylim(y_range)\n if(not hide_legend): axes.legend()\n if(title and not isinstance(fig, type(None))): fig.suptitle(title)\n if(out_path and not isinstance(fig, type(None))): fig.savefig(out_path)\n if(not isinstance(fig, type(None))): fig.show()\n\n return fig, axes\n\ndef envPot_diffS_compare(eds_potential:nDPot.envelopedPotential, s_values:list, positions:list,\n y_range:tuple=None,title:str=None, out_path:str=None):\n ##row/column ratio\n per_row =4\n n_rows = (len(s_values)//per_row)+1 if ((len(s_values)%per_row)>0) else (len(s_values)//per_row)\n\n ##plot\n fig, axes = plt.subplots(nrows=n_rows, ncols=per_row, figsize=(20,10))\n axes = [ax for ax_row in axes for ax in ax_row]\n\n for ax, s in zip( axes, s_values):\n eds_potential.s=s\n y=eds_potential.ene(positions)\n ax.plot(positions, y)\n\n #styling\n ax.set_xlim(min(positions), max(positions))\n ax.set_title(\"s_\"+str(significant_decimals(s)))\n ax.set_ylabel(\"Vr/[kJ]\")\n ax.set_xlabel(\"r\")\n if (y_range != None): ax.set_ylim(y_range)\n\n ##optionals\n if(title): fig.suptitle(title)\n if(out_path): fig.savefig(out_path)\n fig.show()\n return fig, axes\n\ndef plot_envelopedPotential_system(eds_potential:nDPot.envelopedPotential, positions:list, s_value:float=None, Eoffi:list=None,\n y_range:tuple=None,title:str=None, out_path:str=None):\n if(s_value!=None):\n eds_potential.s = s_value #set new s\n if(Eoffi!=None):\n if(len(Eoffi) == len(eds_potential.V_is)):\n eds_potential.Eoff_i = Eoffi\n else:\n raise IOError(\"There are \"+str(len(eds_potential.V_is))+\" states and \"+str(Eoffi)+\", but the numbers have to be equal!\")\n\n ##calc energies\n energy_Vr = eds_potential.ene(positions)\n energy_Vis = [state.ene(positions) for state in eds_potential.V_is]\n num_states = len(eds_potential.V_is)\n\n ##plot nicely\n fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 10))\n axes = [ax for ax_row in axes for ax in ax_row]\n y_values = energy_Vis + [energy_Vr]\n labels = [\"state_\"+str(ind) for ind in range(1,len(energy_Vis)+1)]+[\"refState\"]\n\n for ax, y, label in zip(axes, y_values, labels):\n ax.plot(positions, y)\n ax.set_xlim(min(positions), max(positions))\n ax.set_ylim(y_range)\n ax.set_title(label)\n ax.set_ylabel(\"Vr/[kJ]\")\n ax.set_xlabel(\"r_\"+label)\n\n ##optionals\n if(title): fig.suptitle(title)\n if(out_path): fig.savefig(out_path)\n fig.show()\n return fig, axes\n\ndef plot_envelopedPotential_2State_System(eds_potential: nDPot.envelopedPotential, positions:list, s_value:float=None, Eoffi:list=None,\n title:str=None, out_path:str=None, V_max:float=600, V_min:float=None):\n\n if(len(eds_potential.V_is)>2):\n raise IOError(__name__+\" can only be used with two states in the potential!\")\n\n if(s_value!=None):\n eds_potential.s = s_value\n\n if (Eoffi != None):\n if (len(Eoffi) == len(eds_potential.V_is)):\n eds_potential.Eoff_i = Eoffi\n else:\n raise IOError(\"There are \" + str(len(eds_potential.V_is)) + \" states and \" + str(\n Eoffi) + \", but the numbers have to be equal!\")\n\n #Calculate energies\n energy_Vr = eds_potential.ene(positions)\n energy_Vis = [state.ene(positions) for state in eds_potential.V_is]\n energy_map = []\n min_e = 0\n\n for x in positions:\n row = eds_potential.ene(list(map(lambda y:[[x], [y]], list(positions))))\n row_cut = list(map(lambda x: V_max if(V_max != None and float(x) > V_max) else float(x), row))\n energy_map.append(row_cut)\n if(min(row)< min_e):\n min_e=min(row)\n\n if(V_min==None):\n V_min=min_e\n\n ##plot nicely\n fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(10, 10))\n axes = [ax for ax_row in axes for ax in ax_row]\n y_values = energy_Vis + [energy_Vr]\n labels = [\"State_\" + str(ind) for ind in range(1, len(energy_Vis) + 1)] + [\"State_R\"]\n\n #plot the line potentials\n colors = [\"steelblue\", \"orange\", \"forestgreen\"]\n for ax, y, label,c in zip(axes, y_values, labels,colors):\n ax.plot(positions, y, c)\n ax.set_xlim(min(positions), max(positions))\n ax.set_ylim([V_min, V_max])\n ax.set_title(\"Potential $\"+label+\"$\")\n ax.set_ylabel(\"$V/[kJ]$\")\n ax.set_xlabel(\"$r_{ \" + label+\"} $\")\n\n #plot phase space surface\n ax = axes[-1]\n surf = ax.imshow(energy_map, cmap=\"inferno\", interpolation=\"nearest\",\n origin='center', extent=[min(positions), max(positions), min(positions), max(positions)],\n vmax=V_max, vmin=V_min)\n ax.set_xlabel(\"$r_{\"+labels[0]+\"}$\")\n ax.set_ylabel(\"$r_{\"+labels[1]+\"}$\")\n ax.set_title(\"complete phaseSpace of $state_R$\")\n #fig.colorbar(surf, aspect=5, label='Energy/kJ')\n\n ##optionals\n if(title): fig.suptitle(title)\n if(out_path): fig.savefig(out_path)\n fig.show()\n return fig, axes\n\n\ndef envPot_diffS_2stateMap_compare(eds_potential: pot.envelopedPotential, s_values: list, positions: list,\n V_max: float = 500, V_min: float = None, title: str = None, out_path: str = None):\n ##row/column ratio\n per_row = 4\n n_rows = (len(s_values) // per_row) + 1 if ((len(s_values) % per_row) > 0) else (len(s_values) // per_row)\n\n ##plot\n fig, axes = plt.subplots(nrows=n_rows, ncols=per_row, figsize=(20, 10))\n axes = [ax for ax_row in axes for ax in ax_row]\n first = True\n\n for ax, s in zip(axes, s_values):\n eds_potential.s = s\n min_e = 0\n energy_map = []\n for x in positions:\n row = eds_potential.ene(list(map(lambda y: [[x], [y]], list(positions))))\n row_cut = list(map(lambda x: V_max if (V_max != None and float(x) > V_max) else float(x), row))\n energy_map.append(row_cut)\n if (min(row) < min_e):\n min_e = min(row)\n\n if (V_min == None and first):\n V_min = min_e\n first = False\n print(\"emin: \", min_e)\n\n # plot phase space surface\n surf = ax.imshow(energy_map, cmap=\"viridis\", interpolation=\"nearest\",\n origin='center', extent=[min(positions), max(positions), min(positions), max(positions)],\n vmax=V_max, vmin=V_min)\n ax.set_xlabel(\"$r_1$\")\n ax.set_ylabel(\"$r_2$\")\n ax.set_title(\"complete phaseSpace of $state_R$\")\n fig.colorbar(surf, aspect=10, label='Energy/kJ')\n\n ##optionals\n if (title): fig.suptitle(title)\n if (out_path): fig.savefig(out_path)\n fig.show()\n\n return fig, axes\n\n\n#2D\n\n\n\"\"\"\n Wrappers for special Cases\n\"\"\"\ndef plot_2D_2states(V1, V2, space_range:Tuple[Number, Number]=None):\n fig, axes = plt.subplots(ncols=2, figsize=[15,10])\n _, ax1, surf1 = plot_2DPotential(V1, ax=axes[0], title=\"State 1\", x_label=\"$\\phi/[^{\\circ}]$\", y_label=\"$\\psi/[^{\\circ}]$\", space_range=space_range)\n _, ax2, surf2 = plot_2DPotential(V2, ax=axes[1], title=\"State 2\", x_label=\"$\\phi/[^{\\circ}]$\", y_label=\"$\\psi/[^{\\circ}]$\", space_range=space_range)\n\n #color bar:\n cbaxes = fig.add_axes([ax2.get_position().x1*1.15, ax2.get_position().y0, 0.03, ax2.get_position().height]) \n cb = plt.colorbar(surf2 , cax = cbaxes, ticks=list(np.round(np.linspace(np.min(surf1._A),np.max(surf1._A),5),2)),)\n cb.set_label(\"V/[kT]\")\n fig.tight_layout()\n\n fig.suptitle(\"The Two End States for EDS Potential\", y=0.9)\n\n return fig\n\ndef plot_2D_2State_EDS_potential(eds_pot, out_path:str=None, traj=None, s=100, positions2D=None, space_range=[-180, 180], point_resolution=500, x_label=\"$\\phi/[^{\\circ}$]\", y_label=\"$\\psi/[^{\\circ}$]\", verbose=False):\n traj_color = \"orange\"\n \n #build positions\n if(isinstance(positions2D, type(None))):\n minX, maxX = min(space_range), max(space_range)\n minY, maxY = min(space_range), max(space_range)\n positions = np.linspace(min(space_range), max(space_range), point_resolution)\n x_positions, y_positions = np.meshgrid(positions,positions)\n positions2D = np.array([x_positions.flatten(), y_positions.flatten()]).T\n else:\n positions2D = np.array(positions2D)\n point_resolution=len(np.unique(positions2D[:,0]))\n minX, maxX = min(positions2D[:,0]), max(positions2D[:,0])\n minY, maxY = min(positions2D[:,1]), max(positions2D[:,1]) \n\n #calc energies for total space\n #subPotentials\n eds_pot.s = s\n V1 = eds_pot.V_is[0]\n V2 = eds_pot.V_is[1]\n #Energies\n energies1 = V1.ene(positions2D)\n energies2 = V2.ene(positions2D)\n energiesEds = eds_pot.ene(positions2D)\n\n #generate map for 2D\n if(verbose): print(\"map data\")\n energies1Map = energies1.reshape([point_resolution, point_resolution])\n energies2Map = energies2.reshape([point_resolution, point_resolution])\n energiesEdsMap = energiesEds.reshape([point_resolution, point_resolution])\n\n #plotting\n if(verbose): print(\"plot\")\n fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=[15,6], dpi=300)\n\n minV,maxV = np.min(energies1Map), np.max(energies1Map)\n surf1 = ax1.imshow(energies1Map, cmap=cmap, interpolation=\"nearest\", origin='center', vmax=maxV, vmin=minV, extent=[minX, maxX, minY, maxY])\n surf2 = ax2.imshow(energies2Map, cmap=cmap, interpolation=\"nearest\", origin='center', vmax=maxV, vmin=minV, extent=[minX, maxX, minY, maxY]) \n\n minV,maxV = np.min(energies1), np.max(energies1)\n surf3 = ax3.imshow(energiesEdsMap, cmap=cmap, interpolation=\"nearest\", origin='center', vmax=maxV, vmin=minV, extent=[minX, maxX, minY, maxY])\n \n #color bar:\n cbaxes = fig.add_axes([1.0, 0.1, 0.03, 0.8]) \n cb = plt.colorbar(surf3 ,fraction=0.046, pad=0.04, cax = cbaxes, ticks=list(np.round(np.linspace(minV,maxV,5),2)))\n cb.set_label(\"V/[kT]\")\n \n ##LAEBELLING FUN\n ax1.set_ylim([min(space_range),max(space_range)])\n ax2.set_ylim([min(space_range),max(space_range)])\n ax3.set_ylim([min(space_range),max(space_range)])\n \n ax1.set_xlim([min(space_range),max(space_range)])\n ax2.set_xlim([min(space_range),max(space_range)])\n ax3.set_xlim([min(space_range),max(space_range)])\n \n ax1.set_ylabel(y_label, fontsize=18)\n \n ax1.set_xlabel(x_label, fontsize=18)\n ax2.set_xlabel(x_label, fontsize=18)\n ax3.set_xlabel(x_label, fontsize=18)\n\n \n ax1.set_yticks([-180, -90, 0, 90, 180])\n ax2.set_yticks([])\n ax3.set_yticks([])\n ax1.set_xticks([-180, -90, 0, 90, 180])\n ax2.set_xticks([-180, -90, 0, 90, 180])\n ax3.set_xticks([-180, -90, 0, 90, 180])\n \n ax1.tick_params(labelsize=14)\n ax2.tick_params(labelsize=14)\n ax3.tick_params(labelsize=14)\n \n #put TRAJ in to landscape\n if(not isinstance(traj, type(None))):\n visited_positions = traj.position.values\n vis_pos_x, vis_pos_y = np.array([state_positions[0] for state_positions in traj.position]).T\n\n ax1.scatter(vis_pos_x, vis_pos_y, c=traj_color, alpha=0.3)\n ax2.scatter(vis_pos_x, vis_pos_y, c=traj_color, alpha=0.3)\n ax3.scatter(vis_pos_x, vis_pos_y, c=traj_color, alpha=0.3)\n\n ax1.scatter(list(traj.position)[-1][0][0], list(traj.position)[-1][0][1], c=\"r\")\n ax2.scatter(list(traj.position)[-1][0][0], list(traj.position)[-1][0][1], c=\"r\")\n ax3.scatter(list(traj.position)[-1][0][0], list(traj.position)[-1][0][1], c=\"r\")\n\n ax1.scatter(list(traj.position)[0][0][0], list(traj.position)[0][0][0], c=\"g\")\n ax2.scatter(list(traj.position)[0][0][0], list(traj.position)[0][0][0], c=\"g\")\n ax3.scatter(list(traj.position)[0][0][0], list(traj.position)[0][0][0], c=\"g\")\n\n ax1.set_title(\"State 0\", fontsize=20)\n ax2.set_title(\"State 1\", fontsize=20)\n ax3.set_title(\"$s=\"+str(eds_pot.s)+\"$\", fontsize=16)\n fig.suptitle(\"EDS potential: s=\"+str(eds_pot.s))\n\n if(isinstance(out_path, type(None))):\n return fig\n else:\n fig.savefig(out_path, bbox_inches='tight')\n plt.close(fig)\n return out_path\n\n\ndef plot_2D_2State_EDS_potential_sDependency(sVal_traj_Dict:(dict, List), eds_pot, out_path:str=None, plot_trajs=False, space_range=[-180,180], point_resolution=500, positions2D=None, x_label=\"$\\phi/[^{\\circ}$]\", y_label=\"$\\psi/[^{\\circ}$]\", verbose=False):\n \n \n cmap = \"tab20b\"\n traj_color = \"orange\"\n ##positions\n #build positions\n if(isinstance(positions2D, type(None))):\n minX, maxX = min(space_range), max(space_range)\n minY, maxY = min(space_range), max(space_range)\n positions = np.linspace(min(space_range), max(space_range), point_resolution)\n x_positions, y_positions = np.meshgrid(positions,positions)\n positions2D = np.array([x_positions.flatten(), y_positions.flatten()]).T\n else:\n positions2D = np.array(positions2D)\n point_resolution=len(np.unique(positions2D[:,0]))\n minX, maxX = min(positions2D[:,0]), max(positions2D[:,0])\n minY, maxY = min(positions2D[:,1]), max(positions2D[:,1]) \n\n #V1, V2 = eds_pot.V_is\n if(verbose): print(\"calc tot space\")\n (V1, V2) = eds_pot.V_is\n energies1 = V1.ene(positions2D)\n energies2 = V2.ene(positions2D)\n\n #map data\n if(verbose): print(\"map data\")\n energies1Map = energies1.reshape([point_resolution, point_resolution])\n energies2Map = energies2.reshape([point_resolution, point_resolution])\n energyMaps = [energies1Map, energies2Map, []]\n\n relative_barrier = round(np.max(energies1Map)-np.min(energies1Map), 2) \n minV,maxV = min(energies1), min(energies1)+relative_barrier\n \n if(verbose): print(\"plot\") \n # gridspec inside gridspec\n \n nrows = len(sVal_traj_Dict)\n ncols = 3 # 3 states in the system\n \n fig = plt.figure(figsize=(7, 21), constrained_layout=False, dpi=300)\n outer_grid = fig.add_gridspec(nrows, ncols, wspace=0.1, hspace=0.1)\n for row, s in zip(range(nrows), sVal_traj_Dict):\n if(verbose): print(s)\n\n #eds pot energies\n eds_pot.s = s\n energiesEds= eds_pot.ene(positions2D)\n energiesEdsMap = energiesEds.reshape([point_resolution, point_resolution])\n energyMaps[-1] = energiesEdsMap\n \n eminV,emaxV = np.min(energiesEdsMap), np.max(energiesEdsMap)\n if(verbose): print(\"EDS - Barrier: \", emaxV-eminV)\n \n if(plot_trajs and isinstance(sVal_traj_Dict, type(dict))):\n tmp_visit_x, tmp_visit_y = np.array([state_positions[0] for state_positions in s_val_posDict[s].position]).T\n \n #plot landscapes\n for col in range(ncols):\n ax = fig.add_subplot(outer_grid[row,col])\n \n if(col == 2):\n eminV,emaxV = np.min(energiesEdsMap), np.max(energiesEdsMap)+relative_barrier\n surf = ax.imshow(energyMaps[col], cmap=cmap, origin='center', vmax=emaxV, vmin=eminV, extent=[minX, maxX, minY,maxY]) #interpolation=\"nearest\", \n else:\n surf = ax.imshow(energyMaps[col], cmap=cmap, interpolation=\"nearest\", origin='center', vmax=maxV, vmin=minV, extent=[minX, maxX, minY,maxY])\n if(plot_trajs): ax.scatter(tmp_visit_x, tmp_visit_y, c=traj_color, alpha=0.3, s=2) #plot trajs\n\n ax.set_ylim([minY, maxY])\n ax.set_xlim([minX, maxX])\n ax.tick_params(labelsize=14)\n\n #labelling fun\n if(row == 0):\n if(col == 0):\n ax.set_title(\"State 1\", fontsize=20)\n elif(col == 1):\n ax.set_title(\"State 2\", fontsize=20)\n else:\n ax.set_title(\"EDS state\", fontsize=20)\n if(col==0):\n ax.set_ylabel(y_label, fontsize=18)\n ax.set_yticks([-180,0,180])\n ax.text(x=-450,y=-0,s=\"s=\"+str(s), rotation=90, verticalalignment=\"center\", horizontalalignment=\"center\", fontsize=14)\n else:\n ax.set_yticks([])\n\n if(row == nrows-1):\n ax.set_xlabel(x_label, fontsize=18)\n ax.set_xticks([minX, maxX])\n ax.set_xticklabels([minX, 0, maxX], rotation=45)\n\n else:\n ax.set_xticks([])\n\n\n #colorbar\n cmap = matplotlib.cm.get_cmap(cmap)\n norm = matplotlib.colors.Normalize(vmin=minV, vmax=maxV)\n cbaxes = fig.add_axes([1.0, 0.1, 0.03, 0.8]) \n cb = matplotlib.colorbar.ColorbarBase(cbaxes, cmap=cmap,\n norm=norm,\n orientation='vertical',)\n cb.set_label(\"V/[kT]\")\n \n if(isinstance(out_path, type(None))):\n return fig\n else:\n fig.savefig(out_path, bbox_inches='tight')\n plt.close(fig)\n return out_path\n\n\n\n\nif __name__ == \"__main__\":\n pass","sub_path":"ensembler/visualisation/plotPotentials.py","file_name":"plotPotentials.py","file_ext":"py","file_size_in_byte":28532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"421123673","text":"# pack consecutive duplicates into lists\n\ndef pack_duplicates(lst):\n if len(lst) <= 1:\n return lst\n\n pre = lst[0]\n result = []\n count = 1\n for item in lst[1:]:\n if pre == item:\n count += 1\n continue\n\n result.append([pre for i in range(count)])\n pre = item\n count = 1\n\n result.append([pre for i in range(count)])\n\n return result\n\n\nif __name__ == \"__main__\":\n print(pack_duplicates([1, 1, 2, 3, 4, 6, 5, 2, 5, 2, 4, 2, 2]))\n","sub_path":"python/problems99/p9.py","file_name":"p9.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"222663525","text":"\nimport cv2\nimport numpy as np\nimport os\n \nfrom os.path import isfile, join\n \n\ndef detect_panels(frame):\n gauss = cv2.GaussianBlur(frame, (7, 7), 0)\n edged = cv2.Canny(gauss, 25, 70)\n\n # find contours in the edged image, keep only the largest\n # ones\n\n cnts = cv2.findContours(edged, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[1]\n cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]\n\n screenCnt = []\n area = []\n e = 0\n # loop over our contours\n\n for (i, c) in enumerate(cnts):\n # approximate the contour\n epsilon = 0.1*cv2.arcLength(c,True)\n approx = cv2.approxPolyDP(c,epsilon,True)\n # if our approximated contour has four points, then\n # we can assume that we have found a panel\n if len(approx) == 4:\t\n screenCnt.append(approx)\n area.append(cv2.contourArea(approx))\n e+=1\n\n cv2.drawContours(frame, [screenCnt], -1, (0, 255, 0), 3)\n cv2.imshow(\"Panel detection\", frame)\n cv2.waitKey(0)\n\n\n\ndef read_video(nameIn):\n cap = cv2.VideoCapture(nameIn)\n if (cap.isOpened()== False): \n print(\"Error opening video stream or file\")\n \n # Read until video is completed\n while(cap.isOpened()):\n # Capture frame-by-frame\n ret, frame = cap.read()\n if ret == True:\n \n # Display the resulting frame\n cv2.imshow('Frame',frame)\n detect_panels(frame)\n # Press Q on keyboard to exit\n if cv2.waitKey(15) & 0xFF == ord('q'):\n break\n \n # Break the loop\n else: \n break\n \n # When everything done, release the video capture object\n cap.release()\n \n # Closes all the frames\n cv2.destroyAllWindows()\n\n\n \ndef main():\n nameIn= '/home/victoria/code testing/python/video3.avi'\n pathOut = 'video3.avi'\n read_video(nameIn)\n\n\nif __name__==\"__main__\":\n main()","sub_path":"panel_det.py","file_name":"panel_det.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"33785440","text":"import sys\nimport time\nimport math\nimport numpy as np\n\n# Used to build our tree\nclass Node(object):\n def __init__(self, data, parent):\n self.data = data\n self.left = None\n self.right = None\n self.parent = parent\n self.split_attribute = None\n self.label = None\n self.label_counts = {}\n\n def is_left_child(self):\n return self is self.parent.left\n\n def is_right_child(self):\n return self is self.parent.right\n\n def __str__(self):\n return \"NODE \\n\" + \"Data: \" + str(self.data) + \"\\n\" + \"Split attribute: \" + str(self.split_attribute) + \"\\nExists Parent: \" + str(self.parent != None) + \\\n \"\\nExists Left Child: \" + str(self.left !=None) + \"\\nExists Right Child: \" + str(self.right != None) + \"\\nLabel (if leaf): \" + str(self.label)\n\n\n# Global parameters of model\ntrain_vocab = {}\nvocab = set()\nlabels = {}\nlabels_inverse = {}\nleaf_nodes = []\nnum_train_docs = 0\nnum_test_docs = 0\nmax_depth = int(sys.argv[3])\nmin_gain = float(sys.argv[4])\n\n\n# Build a vocabulary that stores all positions in alphabetical order and corresponding words\n# Position will be used later for making vectors of documents\ndef build_training_vocab():\n\n global labels\n global train_vocab\n global num_train_docs\n global num_test_docs\n global train_line_labels\n global vocab\n \n vocab = set([])\n\n # Make the vocab a set\n with open(sys.argv[1]) as file:\n for line in file:\n s = line.split()\n if s[0] not in labels:\n labels[s[0]] = len(labels)\n labels_inverse[len(labels_inverse)] = s[0]\n for i in range(1, len(s)):\n vocab.add(s[i].split(\":\")[0])\n num_train_docs += 1\n\n sort = sorted(list(vocab))\n for i in range(0, len(sort)):\n if sort[i] not in train_vocab:\n train_vocab[sort[i]] = i\n vocab.add(sort[i])\n\n \n # As an extra step, get number of test docs\n with open(sys.argv[2]) as test:\n for line in test:\n num_test_docs += 1\n\n\n# Build document training vectors (vectors are dictionaries here)\ndef build_matrix(vocab, phase):\n \n global num_train_docs\n global num_test_docs\n\n num_docs = -1\n arg = -1\n if phase == \"train\":\n arg = 1\n num_docs = num_train_docs\n elif phase == \"test\":\n arg = 2\n num_docs = num_test_docs\n\n # Create a matrix (num_docs * num_features (+1 for class label))\n data = np.zeros((num_docs, len(train_vocab) + 1))\n\n # Make a vector for each document, add as row to matrix\n with open(sys.argv[arg]) as file:\n line_count = 0\n for line in file:\n # Add 1s where necessary and class label at end\n s = line.split()\n data[line_count][-1] = labels[s[0]]\n for i in range(1,len(s)):\n curr_word = s[i].split(\":\")[0]\n # If word in vocab (sometimes test words may not be in vocab)\n if curr_word in vocab:\n data[line_count][vocab[curr_word]] = 1\n line_count += 1\n \n return data\n\n\n# Get the entropy of some data\ndef entropy(data):\n\n # Get the counts of each label\n label_counts = np.unique(data, return_counts=True)[1]\n \n # Get probabilities of labels (how many of label x / total in column)\n label_probs = np.array([count / len(data) for count in label_counts])\n \n # Dot the probs (a column vector x) with log base 2 of the probs (this is entropy)\n # -(x1log2x1 + x2log2x2 + x3log2x3)....\n return -label_probs.dot(np.log2(label_probs))\n\n\n# Calculate info gain for an attribute given a set\ndef info_gain(attribute, data):\n index = train_vocab[attribute]\n\n x = data[:, index]\n y = data[:, -1]\n z = np.sum([x,y * 2], axis = 0)\n\n\n return entropy(x) + entropy(y) - entropy(z)\n\n\n# Given a set (data at a node), find which attribute to split on and split\ndef split_data(node):\n # print(\"called find best split - \" + \"num docs: \" + str(len(node.data)))\n max_info_gain = 0\n best = None\n\n # Test each attribute (word) i (len(training_data[0]) - 1 = |V|)\n t0 = time.time()\n for attribute in vocab:\n curr_info_gain = info_gain(attribute, node.data)\n if curr_info_gain > max_info_gain:\n max_info_gain = curr_info_gain\n best = attribute\n\n if max_info_gain < min_gain:\n return None\n \n # print(\"time to find best attribute: \" + str(time.time() - t0))\n\n # Remove attribute\n if best != None:\n vocab.remove(best)\n\n return best\n\n\n# Set the label of leaf nodes after built tree\ndef set_leaf_labels():\n for leaf in leaf_nodes:\n # Get counts of labels at leaf\n label_counts = {}\n if len(leaf.data) != 0:\n for i in range(0, len(leaf.data)):\n if leaf.data[i][-1] in label_counts:\n label_counts[leaf.data[i][-1]] += 1\n else:\n label_counts[leaf.data[i][-1]] = 1\n \n # Make sure to include counts for labels with 0 docs\n for label_num in labels_inverse:\n if label_num not in label_counts:\n label_counts[label_num] = 0\n\n # Set the label for the leaf node\n leaf_label = None\n max_count = -1\n for key in label_counts:\n if label_counts[key] > max_count:\n leaf_label = key\n max_count = label_counts[key]\n \n \n\n leaf.label = labels_inverse[leaf_label]\n leaf.label_counts = label_counts\n\n\n# Build a decision tree\ndef build_tree(curr_node, max_depth, min_gain, curr_depth):\n\n # print(\"depth: \" + str(curr_depth))\n\n global leaf_nodes\n\n if curr_depth == max_depth or len(curr_node.data) == 1:\n leaf_nodes.append(curr_node)\n return\n \n # Find best split, if none then stop\n best_attribute = split_data(curr_node)\n if best_attribute == None:\n leaf_nodes.append(curr_node)\n return\n index = train_vocab[best_attribute]\n\n\n # Build left and right children\n curr_node.split_attribute = best_attribute\n left = curr_node.data[curr_node.data[:, index] == 1]\n right = curr_node.data[curr_node.data[:, index] == 0]\n\n\n # Construct children\n if len(left) != 0:\n curr_node.left = Node(left, curr_node)\n build_tree(curr_node.left, max_depth, min_gain, curr_depth + 1)\n if len(right) != 0:\n curr_node.right = Node(right, curr_node)\n build_tree(curr_node.right, max_depth, min_gain, curr_depth + 1)\n\n # Clear the non-leaf nodes of data (to avoid duplication)\n curr_node.data = None\n\n # Non leaf nodes\n return curr_node\n\n\n# Clear out the leaf nodes of the tree (clear all data, but keep labels learned from training)\ndef clear_tree_data():\n global leaf_nodes\n for leaf in leaf_nodes:\n leaf.data = None\n\n\n# Output training stats to files\ndef output_training():\n \n # Clear previous files\n open(sys.argv[5], \"w\").close()\n open(sys.argv[6], \"w\").close()\n\n \n\n # Write to model file\n with open(sys.argv[5], \"a\") as model_file, open(sys.argv[6], \"a\") as sys_file:\n \n # Headers\n sys_file.write(\"%%%%% training data:\\n\")\n print(\"Confusion matrix for the training data:\")\n print(\"row is the truth, column is the system output\")\n print(\"\\t\\t\\t\", end = \"\")\n for label in sorted(labels):\n print(label + \" \", end = \"\")\n print()\n\n confusion = {}\n doc_counter = 0\n \n # Go through all leaf nodes\n for leaf in leaf_nodes:\n curr = leaf\n\n # Write path from leaf to root in model file (based on features split)\n while curr.parent != None:\n # If this is the left child\n if curr.is_left_child():\n model_file.write(str(curr.parent.split_attribute))\n # If this is the right child\n elif curr.is_right_child():\n model_file.write(\"!\" + str(curr.parent.split_attribute))\n # Update curr\n curr = curr.parent\n if curr.parent != None:\n model_file.write(\"&\")\n\n # Write number of examples at leaf node and distribution at leaf\n model_file.write(\" \" + str(len(leaf.data)) + \" \")\n for label in leaf.label_counts:\n model_file.write(str(labels_inverse[label]) + \" \" + str(leaf.label_counts[label] / len(leaf.data)) + \" \")\n\n # Write to sys file\n for doc in leaf.data:\n sys_file.write(\"array:\" + str(doc_counter) + \" \")\n doc_counter += 1\n for label in leaf.label_counts:\n sys_file.write(str(labels_inverse[label]) + \" \" + str(leaf.label_counts[label] / len(leaf.data)) + \" \")\n sys_file.write(\"\\n\")\n \n\n # Get confusion data\n for i in range(0, len(leaf.data)):\n my_label = leaf.label\n system_label = labels_inverse[leaf.data[i][-1]]\n\n if (my_label, system_label) in confusion:\n confusion[(my_label, system_label)] += 1\n else:\n confusion[(my_label, system_label)] = 1\n \n # New line for each leaf\n model_file.write(\"\\n\")\n\n \n # Fill in rest = 0 for confusion matrix\n for label_x in labels:\n for label_y in labels:\n if (label_x, label_y) not in confusion:\n confusion[(label_x, label_y)] = 0\n \n \n # Print confusion matrix\n accurate_count = 0\n total_count = 0\n for label_x in sorted(labels):\n print(label_x, end = \"\")\n for label_y in sorted(labels):\n if label_x == label_y:\n accurate_count += confusion[(label_x, label_y)]\n total_count += confusion[(label_x, label_y)]\n print(\"\\t\" + str(confusion[(label_x, label_y)]), end = \"\")\n print()\n \n print(\"\\nTraining accuracy: \" + str(accurate_count / total_count))\n \n\n# Output testing stats\ndef output_testing():\n with open(sys.argv[6], \"a\") as sys_file:\n\n # Header\n sys_file.write(\"\\n\\n %%%%% test data:\\n\")\n print(\"\\n\\nConfusion matrix for the training data:\")\n print(\"row is the truth, column is the system output\")\n print(\"\\t\\t\\t\", end = \"\")\n for label in sorted(labels):\n print(label + \" \", end = \"\")\n print()\n\n confusion = {}\n doc_counter = 0\n\n for leaf in leaf_nodes:\n # Get confusion data\n if leaf.data != None:\n for i in range(0, len(leaf.data)):\n my_label = leaf.label\n system_label = labels_inverse[leaf.data[i][-1]]\n\n if (my_label, system_label) in confusion:\n confusion[(my_label, system_label)] += 1\n else:\n confusion[(my_label, system_label)] = 1\n\n # Write to sys file\n if leaf.data != None:\n for doc in leaf.data:\n sys_file.write(\"array:\" + str(doc_counter) + \" \")\n doc_counter += 1\n for label in leaf.label_counts:\n sys_file.write(str(labels_inverse[label]) + \" \" + str(leaf.label_counts[label] / len(leaf.data)) + \" \")\n sys_file.write(\"\\n\")\n \n \n # Fill in rest = 0 for confusion matrix\n for label_x in labels:\n for label_y in labels:\n if (label_x, label_y) not in confusion:\n confusion[(label_x, label_y)] = 0\n \n\n # Print confusion matrix\n accurate_count = 0\n total_count = 0\n for label_x in sorted(labels):\n print(label_x, end = \"\")\n for label_y in sorted(labels):\n if label_x == label_y:\n accurate_count += confusion[(label_x, label_y)]\n total_count += confusion[(label_x, label_y)]\n print(\"\\t\" + str(confusion[(label_x, label_y)]), end = \"\")\n print()\n \n print(\"\\nTesting accuracy: \" + str(accurate_count / total_count))\n\n\n# Given test data, classify it using the tree which has been built\ndef classify(documents, root):\n \n\n # For each test vector (document)\n for document in documents:\n curr_node = root\n while curr_node.split_attribute != None:\n split_on = curr_node.split_attribute\n if document[train_vocab[split_on]] == 1:\n curr_node = curr_node.left\n else:\n curr_node = curr_node.right\n\n if curr_node.data == None:\n curr_node.data = [document]\n else:\n curr_node.data.append(document)\n\n if document[-1] in curr_node.label_counts:\n curr_node.label_counts[document[-1]] += 1\n else:\n curr_node.label_counts[document[-1]] = 1\n\n\n# Time stats\nt0 = time.time()\n\n# Training\nbuild_training_vocab()\nroot = Node(build_matrix(train_vocab, \"train\"), None)\n\n# ####\n# data = root.data\n# ti = time.time()\n# entropy(data[:, 0])\n# print(\"entropy of col 0: \" + str(time.time() - ti))\n# ####\n\n\nbuild_tree(root, max_depth, min_gain, 0)\nset_leaf_labels()\noutput_training()\n\n# Testing\nclear_tree_data()\ntest_documents = build_matrix(train_vocab, \"test\")\nclassify(test_documents, root)\noutput_testing()\n\n\n# Time stats\nt1 = time.time()\ntotal = (t1-t0) / 60\nprint(\"\\n\\nruntime: \" + str(total))","sub_path":"LING572/hw2/dt.py","file_name":"dt.py","file_ext":"py","file_size_in_byte":13699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"178988365","text":"\"\"\"\r\nCreated on Mon Sep 24 16:40:52 2018\r\n\r\n@author: Natna\r\n\"\"\"\r\nimport os\r\n\r\nimport argparse\r\nimport sys\r\nfrom datetime import datetime \r\n#os.chdir('C:/Users/Natna/Downloads/enron_mail_20150507.tar/maildir/')\r\n\r\nx=0\r\ny=0\r\ni=0\r\nk=0\r\nlarg_pair=[]\r\ntemp=[]\r\nmonth={1:'Jan',2:'Feb',3:'Mar',4:'Apr',5:'May',6:'Jun',7:'Jul',8:'Aug',9:'Sep',10:'Oct',11:'Nov',12:'Dec'}\r\nt=3\r\n\r\npairs=[]\r\nap = argparse.ArgumentParser(description='directory to the email folder')\r\nap.add_argument('--input', help='Directroy of input files', default='C:/Users/Natna/Downloads/enron_mail_20150507.tar/maildir/', required = False)\r\nap.add_argument('--output', help='Directroy of output files', default='C:/Users/Natna/Documents', required = False)\r\nap.add_argument('--startdate', help='start date of range', default='1 1 1997', required = False)\r\nap.add_argument('--enddate', help='end date of range', default='31 1 2001', required = False)\r\nargs = vars(ap.parse_args())\r\nsplit_start_date=args['startdate'].split()\r\nsplit_end_date=args['enddate'].split()\r\njoined_start_date=', '.join(split_start_date)\r\njoined_end_date=', '.join(split_end_date)\r\nstart_date=datetime.strptime( joined_start_date,'%d, %m, %Y')\r\nend_date=datetime.strptime( joined_end_date,'%d, %m, %Y')\r\ndates=[]\r\n#file=directory\r\n\r\nif(not(os.path.isdir(args['input']))):\r\n print()\r\n print(\"Inputting data from: \"+args['input'])\r\n print(\"Invalid input directory. Please specifiy correct input path to email dataset\")\r\n print()\r\n ap.print_help()\r\n exit()\r\nif(not(os.path.isdir(args['output']))):\r\n print()\r\n print(\"Outputting result to directory: \"+args['output'])\r\n print(\"Invalid output directory. Please specifiy correct output path for result set\")\r\n print()\r\n ap.print_help()\r\n exit()\r\nfile=os.listdir(args['input'])\r\n\r\nfrom collections import defaultdict\r\nsendDict = defaultdict(set)\r\n\r\nnumFiles = 0\r\nnumProcessed = 0\r\n\r\nfor line in file:\r\n numFiles += 1 \r\n print(\"checking folder #\", numFiles, ': ', line, sep = '')\r\n\r\n sent_mail = args['input']+'/'+line+'/_sent_mail'\r\n \r\n if (not os.path.isdir(sent_mail)): \r\n print(\"no sent mail for:\", line)\r\n \r\n\r\n if (os.path.isdir(sent_mail)):\r\n numProcessed += 1\r\n sent=os.listdir(sent_mail)\r\n for email in sent:\r\n pairs=[]\r\n if(os.path.isfile(sent_mail +\"/\"+email)):\r\n with open(os.path.join(sent_mail,email),'r') as f:\r\n \r\n f_contents=f.readlines()\r\n \r\n \r\n blocks=f_contents[1].split()\r\n for thing in blocks:\r\n for i in range(1,13):\r\n \r\n if thing == month[i]:\r\n \r\n blocks[3]=str(i)\r\n temp_date=', '.join(blocks[2:5])\r\n email_date=datetime.strptime(temp_date ,'%d, %m, %Y')\r\n \r\n \r\n break \r\n \r\n if(start_date<=email_date and email_date<=end_date ):\r\n \r\n \r\n \r\n r=3\r\n while(not(\"Subject:\" in f_contents[r])):\r\n \r\n \r\n \r\n f_contents[r].strip('\\t')\r\n \r\n \r\n temp=f_contents[r].split(',')\r\n \r\n k=0\r\n for employee in temp:\r\n employee = employee.strip()\r\n if(employee != ''):\r\n \r\n f_contents[2]=f_contents[2].strip('\\n')\r\n f_contents[2]=f_contents[2].replace('From: ','')\r\n pairs.append(f_contents[2])\r\n sendDict[line].add(f_contents[2])\r\n\r\n employee=employee.strip()\r\n employee=employee.replace('To: ','')\r\n pairs.append(employee)\r\n pairs.append(email)\r\n larg_pair.append(pairs)\r\n \r\n pairs=[]\r\n \r\n \r\n \r\n r=r+1\r\n \r\n \r\nprint()\r\nprint(\"Number of folders searched: \", numFiles)\r\nprint(\"Number of folders processed: \", numProcessed)\r\nprint(\"Number of folders skipped: \", numFiles - numProcessed)\r\nprint()\r\n\r\nprint(\"outputting edge list to:\", args['output'] + '/edgelist.csv')\r\nwith open(args['output']+'/'+'edgelist.csv','w+') as list:\r\n for item in larg_pair:\r\n edge_list=', '.join(item)\r\n list.write(edge_list+'\\n')\r\n\r\nfor senders in sendDict :\r\n print(senders, \":\" , sendDict[senders])\r\n\r\n","sub_path":"temp2.py","file_name":"temp2.py","file_ext":"py","file_size_in_byte":5196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"191753535","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom tikzplotlib import save\n\nimport gen_pod_uq.mc_pce_utils as mpu\n\nfrom kmg_mtrx_helpers import get_evay\n\n\ndef comp_cdf(ysoltens, nua=0., nub=1., dst='beta-2-5', nsample=1000000,\n pcedim=5):\n \n abscissae, _, _, _ = mpu.\\\n setup_pce(distribution=dst,\n distrpars=dict(a=nua, b=nub),\n pcedim=pcedim, uncdims=4)\n \n evay = get_evay(ysoltens, abscissae)\n \n getsample = mpu.get_nu_sample(distribution=dst,\n uncdims=4, nulb=nua, nuub=nub)\n\n rndsa = getsample(nsample)\n smpllist = []\n for csmpl in rndsa:\n smpllist.append(evay(csmpl.flatten()))\n \n cpfvals = mpu.empirical_cdf(smpllist)\n srtdsmpllist = sorted(smpllist)\n\n return srtdsmpllist, cpfvals\n\n\ndef compmaxdiff(xl, cdfxl, tx, tcdfx, intpoints=2000):\n smin, smax = tx[0], tx[-1]\n for x in xl:\n smin = max(smin, x[0])\n smax = min(smax, x[-1])\n intx = np.linspace(smin, smax, intpoints)\n itcdf = np.interp(x=intx, xp=tx, fp=tcdfx)\n\n diffl, maxl = [], []\n for kkk, cdfx in enumerate(cdfxl):\n icdf = np.interp(x=intx, xp=xl[kkk], fp=cdfx)\n dficdf = icdf-itcdf\n diffl.append([intx, dficdf])\n maxl.append(np.max(np.abs(dficdf)))\n\n return np.median(np.array(maxl)), maxl, diffl\n\n\nif __name__ == '__main__':\n\n smpls = 10 # number of samples for the MC/wRB bases\n runs = 5 # how many runs --- since the sampling is also stochastic\n nua, nub = 5e-4, 10e-4\n smplsforcdf = int(1e6)\n\n onlyplots = True\n onlyplots = False\n\n if onlyplots:\n runs = 1\n smpls = 5 # number of samples for the MC/wRB bases\n pltsmpls = smpls\n else:\n pltsmpls = int(smpls/2)\n pltfilter = 4 # only plot every 4-th data point\n\n dstl = ['beta-2-5', 'uniform']\n for pltadd, dst in enumerate(dstl):\n\n Nndstr = f'N12nu{nua:.2e}--{nub:.2e}' + dst\n dataprfx = 'cached-data/' + Nndstr\n\n ccdfdct = dict(nua=nua, nub=nub, pcedim=5, dst=dst, nsample=smplsforcdf)\n\n for rrr in range(runs):\n yts = dataprfx + '_pce5_ysoltns.npy'\n ysoltens = np.load(yts)\n xtrth, cdfxtrth = comp_cdf(ysoltens, **ccdfdct)\n # jmin, jmax = xtrth[0], xtrth[-1]\n\n if onlyplots:\n pass\n else:\n yts = dataprfx + '_pce2_ysoltns.npy'\n ysoltens = np.load(yts)\n xpcetwo, cdfpcetwo = comp_cdf(ysoltens, pcedim=2, dst=dst,\n nua=nua, nub=nub, nsample=smplsforcdf)\n ppkmmed, _, ppxc \\\n = compmaxdiff([xpcetwo], [cdfpcetwo], xtrth, cdfxtrth)\n print(f'Kolmometer: {dst}: pce[2]: {ppkmmed:.5f}')\n\n yts = dataprfx + '_pce5_pod8_bfpce2_run1of1_ysoltns.npy'\n ysoltens = np.load(yts)\n xpodpcef, cdfpodpcef = comp_cdf(ysoltens, **ccdfdct)\n # jmin, jmax = max(jmin, xpodpcef[0]), min(jmax, xpodpcef[-1])\n ppkmmed, _, ppxc \\\n = compmaxdiff([xpodpcef], [cdfpodpcef], xtrth, cdfxtrth)\n print(f'Kolmometer: {dst}: pce-16: {ppkmmed:.5f}')\n\n # accytns = 0\n xrbl, rbcdfl = [], []\n for kkk in range(smpls):\n cyts = np.load(dataprfx + '_pce5_pod8_bfrb_random16_runs10' + \\\n f'_run{kkk+1}of10_ysoltns.npy')\n xrb, cdfrbx = comp_cdf(cyts, **ccdfdct)\n xrbl.append(xrb)\n rbcdfl.append(cdfrbx)\n # accytns += cyts\n\n rbkmmed, rbkmerrs, rbxc = compmaxdiff(xrbl, rbcdfl, xtrth, cdfxtrth)\n print(f'Kolmometer: {dst}: rb16: {rbkmmed:.5f} -- median out of {smpls}')\n\n if onlyplots:\n pass\n else:\n xrblt, rbcdflt = [], []\n for kkk in range(smpls):\n cyts = np.load(dataprfx + '_pce5_pod8_bfrb_random32_runs10' + \\\n f'_run{kkk+1}of10_ysoltns.npy')\n xrbt, cdfrbxt = comp_cdf(cyts, **ccdfdct)\n xrblt.append(xrbt)\n rbcdflt.append(cdfrbxt)\n # accytns += cyts\n\n rbkmmedt, _, _ = compmaxdiff(xrblt, rbcdflt, xtrth, cdfxtrth)\n print(f'Kolmometer: {dst}: rb32: {rbkmmedt:.5f} -- median out of {smpls}')\n\n xmcl, mccdfl = [], []\n for kkk in range(smpls):\n cyts = np.load(dataprfx + '_pce5_pod8_bfmc16_runs10' + \\\n f'_run{kkk+1}of10_ysoltns.npy')\n xmc, cdfmcx = comp_cdf(cyts, **ccdfdct)\n xmcl.append(xmc)\n mccdfl.append(cdfmcx)\n\n mckmmed, mckmerrs, mcxc = compmaxdiff(xmcl, mccdfl, xtrth, cdfxtrth)\n print(f'Kolmometer: {dst}: mc16: {mckmmed:.5f} -- median out of {smpls}')\n\n if onlyplots:\n pass\n else:\n xmclt, mccdflt = [], []\n for kkk in range(smpls):\n cyts = np.load(dataprfx + '_pce5_pod8_bfmc32_runs10' + \\\n f'_run{kkk+1}of10_ysoltns.npy')\n xmct, cdfmcxt = comp_cdf(cyts, **ccdfdct)\n xmclt.append(xmct)\n mccdflt.append(cdfmcxt)\n mckmmedt, _, _ = compmaxdiff(xmclt, mccdflt, xtrth, cdfxtrth)\n print(f'Kolmometer: {dst}: mc32: {mckmmed:.5f} -- median out of {smpls}')\n\n plt.figure(330+pltadd)\n clrs = []\n pltsmpls = int(smpls/2)\n for kkk in range(pltsmpls+1): # +1 for the legend dummy plot\n clrs.extend([.6])\n clrs.extend([.3])\n clrs.extend([.9])\n plt.rcParams[\"axes.prop_cycle\"] = plt.cycler(\"color\", plt.cm.plasma(clrs))\n\n for kkk in range(pltsmpls):\n plt.plot(rbxc[kkk][0][::pltfilter], rbxc[kkk][1][::pltfilter], alpha=.4)\n plt.plot(mcxc[kkk][0][::pltfilter], mcxc[kkk][1][::pltfilter], alpha=.4)\n\n # one dummy point plot to have the labels in full color\n plt.plot(0.65, 0., label='RB16')\n plt.plot(0.65, 0., label='MC16')\n\n plt.plot(ppxc[0][0][::pltfilter], ppxc[0][1][::pltfilter], label='pcePOD16')\n\n plt.title(dst)\n plt.legend()\n save('kolmomotor'+dst+'.tikz')\n\n plt.show()\n","sub_path":"scripts/kolmogorov-metrix.py","file_name":"kolmogorov-metrix.py","file_ext":"py","file_size_in_byte":6417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"585899758","text":"from unittest import TestCase\n\nfrom ddt import ddt, data, unpack\nfrom mock import MagicMock\n\nfrom Adafruit_LSM303.instruments import Instrument, Inclinometer\n\n\n@ddt\nclass TestInstrument(TestCase):\n def setUp(self):\n lsm303 = MagicMock()\n self.instrument = Instrument(lsm303)\n\n @data(\n (0, 0, 0),\n (1, 0, 0),\n (1, 1, 45),\n (0, 1, 90),\n (-1, 0, 180),\n (-1, -1, 225),\n (0, -1, 270),\n (1, -0.0000000000000001, 360),\n )\n @unpack\n def test_vector_2_degrees(self, x, y, expected_degrees):\n degrees = self.instrument.vector_2_degrees(x, y)\n self.assertEqual(expected_degrees, degrees)\n","sub_path":"test/test_instrument.py","file_name":"test_instrument.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"526600409","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/operators/postgres_operator.py\n# Compiled at: 2019-09-11 03:47:34\n# Size of source mod 2**32: 2696 bytes\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass PostgresOperator(BaseOperator):\n __doc__ = \"\\n Executes sql code in a specific Postgres database\\n\\n :param sql: the sql code to be executed. (templated)\\n :type sql: Can receive a str representing a sql statement,\\n a list of str (sql statements), or reference to a template file.\\n Template reference are recognized by str ending in '.sql'\\n :param postgres_conn_id: reference to a specific postgres database\\n :type postgres_conn_id: str\\n :param autocommit: if True, each command is automatically committed.\\n (default value: False)\\n :type autocommit: bool\\n :param parameters: (optional) the parameters to render the SQL query with.\\n :type parameters: mapping or iterable\\n :param database: name of database which overwrite defined one in connection\\n :type database: str\\n \"\n template_fields = ('sql', )\n template_ext = ('.sql', )\n ui_color = '#ededed'\n\n @apply_defaults\n def __init__(self, sql, postgres_conn_id='postgres_default', autocommit=False, parameters=None, database=None, *args, **kwargs):\n (super(PostgresOperator, self).__init__)(*args, **kwargs)\n self.sql = sql\n self.postgres_conn_id = postgres_conn_id\n self.autocommit = autocommit\n self.parameters = parameters\n self.database = database\n\n def execute(self, context):\n self.log.info('Executing: %s', self.sql)\n self.hook = PostgresHook(postgres_conn_id=(self.postgres_conn_id), schema=(self.database))\n self.hook.run((self.sql), (self.autocommit), parameters=(self.parameters))\n for output in self.hook.conn.notices:\n self.log.info(output)","sub_path":"pycfiles/apache_airflow_arup-1.10.5-py3.6/postgres_operator.cpython-36.py","file_name":"postgres_operator.cpython-36.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"352785669","text":"import numpy as np\nimport random as rm\n\nfrom cellauto.grids.life import LifeGrid\n\nclass ProbLifeGrid(LifeGrid):\n\n def __init__(self, y_size, x_size, survive_list=[2,3], birth_list=[3], probability=0.1):\n LifeGrid.__init__(self, y_size, x_size, survive_list, birth_list)\n self.probability = probability\n\n def evolve(self):\n new_grid = np.copy(self.grid)\n for i, row in enumerate(self.grid):\n for j, item in enumerate(row):\n neighbours = self.count_neighbours(i, j)\n if item == 1:\n if neighbours not in self.survive_list:\n new_grid[i][j] = 0\n else:\n if neighbours in self.birth_list:\n new_grid[i][j] = 1\n if rm.random() < self.probability:\n new_grid[i][j] = (new_grid[i][j] + 1) % 2\n\n self.grid = np.copy(new_grid)\n","sub_path":"cellauto/grids/probable_life.py","file_name":"probable_life.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"214803264","text":"#!/user/bin/env python3\n# -*- coding: utf8 -*-\n\n#===================================================#\n# cleanup.py #\n# Joshua Westgard # \n# 2015-07-16 #\n# #\n# Data preprocessing script for md-newspapers DB #\n# Usage: python3 cleanup.py [in.csv] [out.csv] #\n#===================================================#\n\nimport sys, csv, datetime\n\ninfields = ['state', 'city', 'county', 'title', 'year_pub_start', 'year_pub_end', \n'lccn', 'oclc', 'issn', 'owner_producer', 'url', 'subscription_req', 'image_type', \n'full_text_search', 'date_avail_start', 'date_avail_end', 'issues', 'comments']\n\noutfields = ['id'] + infields + ['range_avail','range_pub','year_facets_list']\n\nwith open(sys.argv[1], 'r') as infile, open(sys.argv[2], 'w') as outfile:\n \n # skip header row in order to use own fieldnames\n next(infile)\n \n # instantiate the reader and writer objects\n dr = csv.DictReader(infile, fieldnames=infields)\n dw = csv.DictWriter(outfile, fieldnames=outfields)\n dw.writeheader()\n \n # loop over the input file, writing results to output file\n for n, row in enumerate(dr):\n \n # create id column\n row['id'] = n + 1\n \n # strip out commas from issue data (should be integer data)\n row['issues'] = row['issues'].replace(',', '')\n \n # split format column on commas and save as space-delimited field\n types = row['image_type'].split(',')\n row['image_type'] = \" \".join([t.strip(' ') for t in types])\n \n # get the years from the end of the available date strings\n avail_start = row['date_avail_start'].split('/')[-1]\n avail_end = row['date_avail_end'].split('/')[-1]\n \n # handle the end date of \"current\"\n if avail_end == \"current\":\n avail_end = \"2015\"\n \n # create display ranges for dates available and dates published\n row['range_avail'] = '{0}-{1}'.format(avail_start, avail_end)\n row['range_pub'] = '{0}-{1}'.format(row['year_pub_start'], row['year_pub_end'])\n \n # generate a field holding the range of years between the begin and end years\n yrs = range(int(avail_start), int(avail_end) + 1)\n row['year_facets_list'] = \" \".join([str(x) for x in yrs])\n \n dw.writerow(row)\n \n\n \n \n \n","sub_path":"transform/md-news.py","file_name":"md-news.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"7675652","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 16 22:20:57 2016\n@usage : Camera Steam I/O Class,use deque and multithreading to increase fetch.\n Frame Storage in limited length container.\n\n\n@author: pip\n\"\"\"\nimport cv2\nimport threading\nimport time\nfrom pylab import *\nimport random\nimport os\nfrom collections import deque\nfrom window.previewWindowManager import PreviewWindowManager\nimport logging\nlogging.basicConfig(level=logging.DEBUG)\n\nclass IOSteam(object):\n \"\"\"\n Implements of frame fetch. \n \"\"\"\n def __init__(self,channel=0,queue_len=30,subthreadName='cameraIO'):\n self._subthreadName=subthreadName \n self._channel=channel\n self._queue_len=queue_len\n self._cameraCapture=cv2.VideoCapture(self._channel)\n self._threading_obj=threading.Thread(target=self._update_frame_continues\n ,name=self._subthreadName#,args=(self)\n )\n #self._latest_frame_signal=threading.Semaphore(n=0)\n self._latest_frame_signal=threading.Event()\n #last frame\n self._frame=None\n self._frame_count=0\n self._dumped_Frame_count=0\n self._jump_init_camera_frame=10\n self._keep_fetch=False\n self._rate=0\n self.framequeue=deque([],self._queue_len)\n \n @property\n def channel(self):\n return self._channel\n\n @channel.setter\n def channel(self, value):\n if self._channel != value:\n self._channel = value\n self._frame = None\n @property\n def frame(self):\n # if(len(self.framequeue)>0):\n # #return last appended frame data\n # return self.framequeue[-1]\n # else:\n # return None\n if self._keep_fetch==False or self._threading_obj.isAlive()==False:\n logging.debug('[-]before Fetch frame,use run() method') \n return None\n else:\n self._latest_frame_signal.wait()\n self._latest_frame_signal.clear()#reset signal until another threading SET.\n #wait for synchronous signal arrived.\n return self._frame\n @property \n def rate(self):\n return self._rate\n def _update_frame(self):\n \"\"\"\n update 1 frame in one manipultion.\n \"\"\"\n flag=self._cameraCapture.grab()\n if flag:\n \n _,self._frame=self._cameraCapture.retrieve()\n #increase signal count;\n self._frame_count+=1\n if self._frame_count>self._jump_init_camera_frame:\n self._latest_frame_signal.set()\n self.framequeue.append(self._frame)\n def _update_frame_continues(self):\n while self._keep_fetch:\n self._update_frame()\n \n \n def dumpFrame(self,direct='./',filename=None ,ext='.png'):\n randomstr=str(random.random())[2:7]\n sp='_'\n if filename is None:\n filename=str(int( time.time() ))\n\n dir_filename=direct+filename+sp+str(self._dumped_Frame_count)+sp+randomstr+ext\n if self._keep_fetch and self._threading_obj.isAlive():\n safeFrame=self.framequeue[-1]\n \n if cv2.imwrite(dir_filename,safeFrame):\n self._dumped_Frame_count+=1\n logging.debug('[+]DumpFrame Fetched and Saved {}'.format(filename))\n else:\n logging.debug('[-]DumpFrame Fetched but Not Saved')\n else:\n logging.debug('[-]threading Not running,No dump')\n \n \n \n def control_thread_obj(self,keepRun=True):\n #pass\n if keepRun:\n #consult threading is running?\n if not self._threading_obj.isAlive():\n self._keep_fetch=True\n self._threading_obj.start()\n logging.info('threading keep fetch RUN')\n else:\n logging.info('threading already in RUN')\n else:\n if self._threading_obj.isAlive():\n #self._threading_obj.start()\n self._keep_fetch=False\n logging.info('threading keep fetch will stop')\n time.sleep(0.5)\n logging.debug('interal Thread obj status :%s'%str(\n self._threading_obj.isAlive() ))\n def closecamera(self):\n self._cameraCapture.release()\n logging.info('Channel %d released'%self._channel)\n #up to now ,dont now how to do.\n def run(self):\n #self._cameraCapture=cv2.VideoCapture(self._channel)\n self.control_thread_obj(True)\n def stop(self):\n self.control_thread_obj(False)\n self.closecamera()\n \n \n\ndef main():\n testWin=PreviewWindowManager('debug')\n testWin.createWindow()\n front=IOSteam()\n n=10\n front.run()\n #cv2.namedWindow('DebugWindow0')\n \n for i in range(n):\n #front._update_frame()\n try:\n lastFrame=front.frame\n #lastFrame=front.framequeue[-1]\n testWin.show(lastFrame)\n #-------------------------\n #------------------------- \n cv2.waitKey(1)\n #------------------------- \n #------------------------- \n #front.dumpFrame()#bug show frame is normally fetched\n except Exception as e:\n logging.debug('Error {}'.format(e))\n \n logging.info('%d frame has done'%n)\n \n \n lastFrame=front.frame\n front.dumpFrame(direct='./faceDumped/')\n testWin.show(lastFrame)\n \n front.stop()\n testWin.destroyWindow()\n\nif __name__=='__main__':\n main()\n\n","sub_path":"Python/OpenCVFaceRecognize/cameraIO.py","file_name":"cameraIO.py","file_ext":"py","file_size_in_byte":5731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"57189483","text":"import unittest\nimport os\n\nfrom monty.serialization import loadfn\nfrom monty.json import jsanitize\nfrom maggma.stores import MemoryStore\nfrom maggma.runner import Runner\n\nfrom propnet.core.builder import PropnetBuilder\n\nTEST_DIR = os.path.dirname(os.path.abspath(__file__))\n\nclass BuilderTest(unittest.TestCase):\n def setUp(self):\n self.materials = MemoryStore()\n self.materials.connect()\n materials = loadfn(os.path.join(TEST_DIR, \"test_materials.json\"))\n materials = jsanitize(materials, strict=True, allow_bson=True)\n self.materials.update(materials)\n self.propstore = MemoryStore()\n self.propstore.connect()\n\n def test_serial_runner(self):\n builder = PropnetBuilder(self.materials, self.propstore)\n runner = Runner([builder])\n runner.run()\n\n def test_multiproc_runner(self):\n builder = PropnetBuilder(self.materials, self.propstore)\n runner = Runner([builder])\n runner.run()\n\n def test_process_item(self):\n item = self.materials.query_one(criteria={\"pretty_formula\": \"Cs\"})\n builder = PropnetBuilder(self.materials, self.propstore)\n processed = builder.process_item(item)\n self.assertIsNotNone(processed)\n # Ensure vickers hardness gets populated\n self.assertIn(\"vickers_hardness\", processed)\n if 'created_at' in item.keys():\n date_value = item['created_at']\n else:\n date_value = \"\"\n\n # Check that provenance values propagate correctly\n current_quantity = processed['vickers_hardness']['quantities'][0]\n at_deepest_level = False\n while not at_deepest_level:\n current_provenance = current_quantity['provenance']\n if current_provenance['inputs'] is not None:\n self.assertEqual(current_provenance['source']['source'],\n \"propnet\")\n self.assertEqual(current_provenance['source']['source_key'],\n current_quantity['internal_id'])\n self.assertNotIn(current_provenance['source']['date_created'],\n (\"\", None))\n current_quantity = current_provenance['inputs'][0]\n else:\n self.assertEqual(current_provenance['source']['source'],\n \"Materials Project\")\n self.assertEqual(current_provenance['source']['source_key'],\n item['task_id'])\n self.assertEqual(current_provenance['source']['date_created'],\n date_value)\n at_deepest_level = True\n\n\n\n # @unittest.skipIf(not os.path.isfile(\"runner.json\"), \"No runner file\")\n # def test_runner_pipeline(self):\n # from monty.serialization import loadfn\n # runner = loadfn(\"runner.json\")\n # runner.builders[0].connect()\n # items = list(runner.builders[0].get_items())\n # processed = runner.builders[0].process_item(items[0])\n # runner.run()\n\n # Just here for reference, in case anyone wants to create a new set\n # of test materials -jhm\n @unittest.skipIf(True, \"Skipping test materials creation\")\n def create_test_docs(self):\n formulas = [\"BaNiO3\", \"Si\", \"Fe2O3\", \"Cs\"]\n from maggma.advanced_stores import MongograntStore\n from monty.serialization import dumpfn\n mgstore = MongograntStore(\"ro:matgen2.lbl.gov/mp_prod\", \"materials\")\n builder = PropnetBuilder(\n mgstore, self.propstore, criteria={\"pretty_formula\": {\"$in\": formulas},\n \"e_above_hull\": 0})\n builder.connect()\n dumpfn(list(builder.get_items()), \"test_materials.json\")\n","sub_path":"propnet/core/tests/test_builder.py","file_name":"test_builder.py","file_ext":"py","file_size_in_byte":3774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"78894987","text":"import socket\nclient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nhost = socket.gethostname()\nhost_ip = '131.179.33.30' # change to ip of server device if not being run locally\nport = 8080\nclient.connect((host_ip, 8080))\nclient.send('I am CLIENT'.encode())\nfrom_server = client.recv(4096)\nclient.close()\nprint(from_server.decode('ascii'))\n","sub_path":"Lab3/clientTest.py","file_name":"clientTest.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"361471776","text":"# code heavily adapted from https://github.com/pengpaiSH/Kaggle_NCFM\nimport os\nimport numpy as np\nimport shutil\n\nnp.random.seed(59)\n\nroot_train = '../../localData/prj3/training_data/train_split_3d/'\nroot_val = '../../localData/prj3/training_data/val_split_3d/'\nroot_test = '../../localData/prj3/training_data/test_split_3d/'\n\nroot_total = '../../localData/prj3/training_data/raw_images_3d/'\n\nlabels = ['labradoodle', 'friedChicken']\n\nn_train_samples = 0\nn_val_samples = 0\nn_test_samples = 0\n\n# Training proportion\nsplit_proportion = (0.6,0.2,0.2) # make sure they sum up to 1.0\n\nfor label in labels:\n if label not in os.listdir(root_train):\n os.mkdir(os.path.join(root_train, label))\n #os.system(\"sudo mkdir \"+os.path.join(root_train, fish))\n\n total_images = os.listdir(os.path.join(root_total, label))\n\n n_train = int(len(total_images) * split_proportion[0])\n n_val = int(len(total_images)*split_proportion[1])\n \n np.random.shuffle(total_images)\n\n train_images = total_images[:n_train]\n val_images = total_images[n_train:(n_train+n_val)]\n test_images = total_images[(n_train+n_val):]\n\n for img in train_images:\n source = os.path.join(root_total, label, img)\n target = os.path.join(root_train, label, img)\n shutil.copy(source, target)\n n_train_samples += 1\n\n if label not in os.listdir(root_val):\n os.mkdir(os.path.join(root_val, label))\n\n for img in val_images:\n source = os.path.join(root_total, label, img)\n target = os.path.join(root_val, label, img)\n shutil.copy(source, target)\n n_val_samples += 1\n \n if label not in os.listdir(root_test):\n os.mkdir(os.path.join(root_test, label))\n\n for img in test_images:\n source = os.path.join(root_total, label, img)\n target = os.path.join(root_test, label, img)\n shutil.copy(source, target)\n n_test_samples += 1\n\nprint('Finish splitting train, val and test images!')\nprint('# training samples: {}, # val samples: {}, # test samples: {}'.format(n_train_samples, n_val_samples, n_test_samples))","sub_path":"lib/split_train_val_test.py","file_name":"split_train_val_test.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"124018650","text":"from eclib.gsw_lwe import *\nfrom eclib.modutils import *\nfrom eclib.colors import *\nimport eclib.figsetup\nimport numpy as np\nimport numpy.linalg as la\nfrom control.matlab import *\nimport matplotlib.pyplot as plt\n\n# sampling time\nTs = 10e-3\n\n# simulation setting\nsimulation_time = 10\nt = np.linspace(0, simulation_time - Ts, int(simulation_time / Ts))\n\n# plant (continuous time)\nA = np.array([[1, -1],\n [0, 2]])\nB = np.array([[0],\n [1]])\nC = np.array([[1, 0],\n [0, 1]])\nD = np.array([[0],\n [0]])\n\n# plant (discrete time)\nsys = c2d(ss(A, B, C, D), Ts)\nA = sys.A\nB = sys.B\nC = sys.C\nD = sys.D\n\n# dimension\nn = A.shape[0]\nm = B.shape[1]\nl = C.shape[0]\n\n# controller\nQ = np.diag(np.ones(n))\nR = np.diag(np.ones(m))\nX, _, _ = dare(A, B, Q, R)\nF = -la.inv(B.T @ X @ B + R) @ (B.T @ X @ A)\n\n# cryptosystem\nN = 10\nT = pow(2, 64)\nq = pow(2, 128)\nsigma = 3.2\nparams, pk, sk = keygen(N, T, q, sigma, N)\n\n# scaling parameter\ndelta = 1e-4\n\n# controller encryption\nF_enc = enc_gsw(params, pk, F, delta)\n\n# state\nx = 50 * np.ones([len(t) + 1, n])\nx_ = 50 * np.ones([len(t) + 1, n])\nx_enc = np.zeros(len(t), dtype=object)\n\n# input\nu = np.zeros([len(t), m])\nu_ = np.zeros([len(t), m])\nu_enc = np.zeros(len(t), dtype=object)\n\n# simulation w/o encryption\nfor k in range(len(t)):\n # controller\n u[k] = F @ x[k]\n # plant update\n x[k+1] = A @ x[k] + B @ u[k]\n\n# simulation w/ encryption\nfor k in range(len(t)):\n # state encryption\n x_enc[k] = enc(params, pk, x_[k], delta)\n # encrypted controller\n u_enc[k] = mult(params, F_enc, x_enc[k])\n # input decryption\n u_[k] = dec(params, sk, u_enc[k], delta ** 2)\n # plant update\n x_[k+1] = A @ x_[k] + B @ u_[k]\n\n# figure\nplt.figure()\nplt.plot(t, u, linestyle='-', color=gray, linewidth=3.0, label='unencrypted')\nplt.plot(t, u_, linestyle='-', color=blue, linewidth=1.0, label='encrypted')\nplt.plot(t, np.zeros(len(t)), linestyle='--', color=black, linewidth=0.5)\nplt.xlabel('Time (s)')\nplt.ylabel(r'$u$')\nplt.xlim(0, simulation_time)\nplt.legend(loc='lower right')\n# plt.savefig('./fig/enc_sf_input.eps', bbox_inches='tight', pad_inches=0.05, transparent=True)\n\nplt.figure()\nplt.plot(t, x[0:-1,0], linestyle='-', color=gray, linewidth=3.0, label='unencrypted')\nplt.plot(t, x[0:-1,1], linestyle='-', color=gray, linewidth=3.0)\nplt.plot(t, x_[0:-1,0], linestyle='-', color=blue, linewidth=1.0, label='encrypted')\nplt.plot(t, x_[0:-1,1], linestyle='-', color=blue, linewidth=1.0)\nplt.plot(t, np.zeros(len(t)), linestyle='--', color=black, linewidth=0.5)\nplt.xlabel('Time (s)')\nplt.ylabel(r'$x$')\nplt.xlim(0, simulation_time)\nplt.legend(loc='upper right')\n# plt.savefig('./fig/enc_sf_state.eps', bbox_inches='tight', pad_inches=0.05, transparent=True)\n\nplt.show()","sub_path":"examples/gsw_lwe/state_feedback.py","file_name":"state_feedback.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"536484681","text":"\"\"\"\nThis file provides the template for designing the agent and environment. The below hyperparameters must be assigned to a value for the algorithm to work properly.\n\"\"\"\n\nimport numpy as np\nfrom .environment import Environment\nfrom ..utils import check_validity\n\ndef design_env():\n\n \"\"\"\n 1. DESIGN AGENT\n\n The key hyperparameters for agent construction are\n\n a. Number of levels in agent hierarchy\n b. Max sequence length in which each policy will specialize\n c. Max number of atomic actions allowed in an episode\n d. Environment timesteps per atomic action\n\n See Section 3 of this file for other agent hyperparameters that can be configured.\n \"\"\"\n\n \"\"\"\n 2. DESIGN ENVIRONMENT\n\n a. Designer must provide the original UMDP (S,A,T,G,R).\n - The S,A,T components can be fulfilled by providing the Mujoco model.\n - The user must separately specifiy the initial state space.\n - G can be provided by specifying the end goal space.\n - R, which by default uses a shortest path {-1,0} reward function, can be implemented by specifying two components: (i) a function that maps the state space to the end goal space and (ii) the end goal achievement thresholds for each dimensions of the end goal.\n\n b. In order to convert the original UMDP into a hierarchy of k UMDPs, the designer must also provide\n - The subgoal action space, A_i, for all higher-level UMDPs i > 0\n - R_i for levels 0 <= i < k-1 (i.e., all levels that try to achieve goals in the subgoal space). As in the original UMDP, R_i can be implemented by providing two components:(i) a function that maps the state space to the subgoal space and (ii) the subgoal achievement thresholds.\n\n c. Designer should also provide subgoal and end goal visualization functions in order to show video of training. These can be updated in \"display_subgoal\" and \"display_end_goal\" methods in the \"environment.py\" file.\n\n \"\"\"\n\n # Provide file name of Mujoco model(i.e., \"pendulum.xml\"). Make sure file is stored in \"mujoco_files\" folder\n model_name = \"ant_reacher.xml\"\n\n\n # Provide initial state space consisting of the ranges for all joint angles and velocities. In the Ant Reacher task, we use a random initial torso position and use fixed values for the remainder.\n\n initial_joint_pos = np.array([0, 0, 0.55, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, -1.0, 0.0, -1.0, 0.0, 1.0])\n initial_joint_pos = np.reshape(initial_joint_pos,(len(initial_joint_pos),1))\n initial_joint_ranges = np.concatenate((initial_joint_pos,initial_joint_pos),1)\n initial_joint_ranges[0] = np.array([2.5,21.5])\n initial_joint_ranges[1] = np.array([2.5,21.5])\n\n # Cocatenate velocity ranges\n initial_state_space = np.concatenate((initial_joint_ranges,np.zeros((len(initial_joint_ranges)-1,2))),0)\n\n initial_state_space_test = np.copy(initial_state_space)\n initial_state_space_test[0] = np.array([2.5,2.5])\n initial_state_space_test[1] = np.array([2.5,2.5])\n\n\n # Provide end goal space. The code supports two types of end goal spaces if user would like to train on a larger end goal space. If user needs to make additional customizations to the end goals, the \"get_next_goal\" method in \"environment.py\" can be updated.\n\n # In the UR5 reacher environment, the end goal will be the desired joint positions for the 3 main joints.\n max_range = 9.5\n goal_space_train = [[2.5,21.5],[2.5,21.5],[0.45,0.55]]\n goal_space_test = [[21.5,21.5],[21.5,21.5],[0.55,0.55]]\n\n\n # Provide a function that maps from the state space to the end goal space. This is used to (i) determine whether the agent should be given the sparse reward and (ii) for Hindsight Experience Replay to determine which end goal was achieved after a sequence of actions.\n project_state_to_end_goal = lambda sim, state: state[:3]\n\n # Set end goal achievement thresholds. If the agent is within the threshold for each dimension, the end goal has been achieved and the reward of 0 is granted.\n\n # For the Ant Reacher task, the end goal will be the desired (x,y) position of the torso\n len_threshold = 0.5\n height_threshold = 0.2\n end_goal_thresholds = np.array([len_threshold, len_threshold, height_threshold])\n\n\n # Provide range for each dimension of subgoal space in order to configure subgoal actor networks. Subgoal space can be the same as the state space or some other projection out of the state space.\n\n # The subgoal space in the Ant Reacher task is the desired (x,y,z) position and (x,y,z) translational velocity of the torso\n cage_max_dim = 11.75\n max_height = 1\n max_velo = 3\n subgoal_bounds = np.array([[0.25,23.75],[0.25,23.75],[0,max_height]])\n\n\n # Provide state to subgoal projection function.\n # a = np.concatenate((sim.data.qpos[:2], np.array([4 if sim.data.qvel[i] > 4 else -4 if sim.data.qvel[i] < -4 else sim.data.qvel[i] for i in range(3)])))\n project_state_to_subgoal = lambda sim, state: state[:3]\n # project_state_to_subgoal = lambda sim, state: np.concatenate((sim.data.qpos[:2], np.array([1 if sim.data.qpos[2] > 1 else sim.data.qpos[2]]), np.array([3 if sim.data.qvel[i] > 3 else -3 if sim.data.qvel[i] < -3 else sim.data.qvel[i] for i in range(2)])))\n\n\n # Set subgoal achievement thresholds\n velo_threshold = 0.5\n quat_threshold = 0.5\n # subgoal_thresholds = np.array([len_threshold, len_threshold, height_threshold, quat_threshold, quat_threshold, quat_threshold, quat_threshold, velo_threshold, velo_threshold, velo_threshold])\n subgoal_thresholds = np.array([len_threshold, len_threshold, height_threshold])\n\n\n # To properly visualize goals, update \"display_end_goal\" and \"display_subgoals\" methods in \"environment.py\"\n\n\n \"\"\"\n 3. SET MISCELLANEOUS HYPERPARAMETERS\n\n Below are some other agent hyperparameters that can affect results, including\n a. Subgoal testing percentage\n b. Subgoal penalty\n c. Exploration noise\n d. Replay buffer size\n \"\"\"\n # Dummy params so that the function works. \n max_actions = 500\n timesteps_per_action = 15 \n show = False\n\n # Ensure environment customization have been properly entered\n check_validity(model_name, goal_space_train, goal_space_test, end_goal_thresholds, initial_state_space, subgoal_bounds, subgoal_thresholds, max_actions, timesteps_per_action)\n\n\n # Instantiate and return agent and environment\n env = Environment(model_name, goal_space_train, goal_space_test, project_state_to_end_goal, end_goal_thresholds, initial_state_space, initial_state_space_test, subgoal_bounds, project_state_to_subgoal, subgoal_thresholds, max_actions, timesteps_per_action, show)\n\n # agent = Agent(FLAGS,env,agent_params)\n\n return env\n","sub_path":"sl_envs/hac/ant_reacher_simple_decentered/design_agent_and_env.py","file_name":"design_agent_and_env.py","file_ext":"py","file_size_in_byte":6794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"205040355","text":"# -*- coding: utf8 -*-\n# GFKARI CARD CRAWLER\n# Version 1.0 - crawls images from gfkari.gamedbs.jp\n# Part of the GFKARIDATABASE project.\n\nimport scrapy\nimport json\nimport math\nimport hashlib\nimport datetime\nfrom gfimages.items import GfimagesItem\n\nclass GfimagesSpider(scrapy.Spider):\n name = 'gfimages'\n start_urls = []\n def __init__(self):\n for i in range(220):\n self.start_urls.append(\"http://gfkari.gamedbs.jp/girl/detail/\" + str(1 + i))\n\n def parse(self, response):\n urls = response.xpath(\"//section//a[@class='cl']/@href\").extract()\n titles = response.xpath(\"//section//a[@class='cl']/@title\").extract()\n for i in range(len(urls)):\n item = GfimagesItem()\n url = 'http://gfkari.gamedbs.jp' + urls[i]\n title = titles[i]\n request = scrapy.Request(url=url, callback=self.image_parser)\n start = title.find(\"No.\") + 3\n end = title.find(\" \")\n card_id = title[start:end]\n card_id.lstrip()\n item['image_url'] = url\n request.meta['card_id'] = card_id\n request.meta['item'] = item\n yield request\n\n def image_parser(self, response):\n item = response.meta['item']\n card_id = response.meta['card_id']\n # hash generation so people don't crawl my database\n # im salty that nobody has a easy-to-crawl db ok\n m = hashlib.md5()\n m.update(str(datetime.datetime.now()))\n image_name = m.hexdigest()\n item['card_id'] = card_id\n item['image_name'] = image_name\n yield item\n","sub_path":"gfimages/gfimages/spiders/GfimagesSpider.py","file_name":"GfimagesSpider.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"197433338","text":"#!/usr/bin/python3\nfrom ivy.std_api import IvySendMsg, IvyStop, IvyInit, IvyStart # grab Ivy functions\nfrom time import sleep # sleep = happiness\nfrom signal import signal, SIGINT, SIGTERM # grab signal functions\nfrom config import ivy_bus, null_cb # grab ivy variables\n\nrunning = True\n\n\ndef sendData():\n timer = 0\n while running:\n timer += 1\n # for test only\n # IvySendMsg(\"FCULATERAL Mode=SelectedHeading Val=50\")\n # IvySendMsg(\"FCULATERAL Mode=SelectedTrack Val=50\")\n # IvySendMsg(\"FCULATERAL Mode=Managed Val=0\")\n IvySendMsg(\"Time t={}s\".format(timer))\n IvySendMsg(\"StateVector x=0 y=0 z=12 Vp=118.3222 fpa=0 psi=0 phi=0\")\n IvySendMsg(\"WindComponent VWind=10 dirWind=200\")\n IvySendMsg(\"MagneticDeclinaison=0\")\n IvySendMsg(\"RollRateLim MaxRollRate=66 / MinRollRate=0\")\n IvySendMsg(\"FGS FgsPt x=120 y=10\")\n IvySendMsg(\"FGS FgsCap cap=50\")\n sleep(3)\n\n\n\ndef stop(*a):\n global running\n running = False\n IvyStop()\n\n\ndef main():\n signal(SIGINT, stop)\n signal(SIGTERM, stop)\n\n IvyInit(\"Sender\", \"Sender is ready to send!\", 0, null_cb, null_cb)\n IvyStart(ivy_bus)\n sleep(1.0)\n # send AP LAT data\n sendData()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"354384076","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os, sys\nimport numpy as np\nimport pandas as pd\nimport helper\n\n#------------------------------------------------------------------------------------------------\n\narid3 = ['MA0151.1', 'MA0601.1', 'PB0001.1']\ncebpb = ['MA0466.1', 'MA0466.2']\nfosl1 = ['MA0477.1']\ngabpa = ['MA0062.1', 'MA0062.2']\nmafk = ['MA0496.1', 'MA0496.2']\nmax1 = ['MA0058.1', 'MA0058.2', 'MA0058.3']\nmef2a = ['MA0052.1', 'MA0052.2', 'MA0052.3']\nnfyb = ['MA0502.1', 'MA0060.1', 'MA0060.2']\nsp1 = ['MA0079.1', 'MA0079.2', 'MA0079.3']\nsrf = ['MA0083.1', 'MA0083.2', 'MA0083.3']\nstat1 = ['MA0137.1', 'MA0137.2', 'MA0137.3', 'MA0660.1', 'MA0773.1']\nyy1 = ['MA0095.1', 'MA0095.2']\n\nmotifs = [[''],arid3, cebpb, fosl1, gabpa, mafk, max1, mef2a, nfyb, sp1, srf, stat1, yy1]\nmotifnames = [ '','arid3', 'cebpb', 'fosl1', 'gabpa', 'mafk', 'max', 'mef2a', 'nfyb', 'sp1', 'srf', 'stat1', 'yy1']\n\n#----------------------------------------------------------------------------------------------------\n\n\nall_models = ['cnn_1', 'cnn_2', 'cnn_4', 'cnn_10', 'cnn_25', 'cnn_50', 'cnn_100',\n 'cnn_50_2', 'cnn9_4', 'cnn9_25', 'cnn3_50', 'cnn3_2', 'cnn_2_1',\n 'cnn_25_60', 'cnn_25_90', 'cnn_25_120']\n\n\nnum_trials = 5\n\n# get performance statistics\nmean_roc_trial = {}\nmean_pr_trial = {}\n\nfor trial in range(num_trials):\n \n results_path = os.path.join('../results', 'synthetic_'+str(trial), 'results.tsv')\n df = pd.read_csv(results_path, delimiter='\\t')\n\n if trial == 0:\n for i, model in enumerate(df['model']):\n mean_pr_trial[model] = []\n mean_roc_trial[model] = []\n ave_roc = df['ave roc']\n ave_pr = df['ave pr']\n\n tmp_roc = []\n tmp_pr = []\n for i, model in enumerate(df['model']):\n mean,_,std = ave_pr[i].split('$')\n mean_pr_trial[model].append(float(mean))\n \n mean,_,std = ave_roc[i].split('$')\n mean_roc_trial[model].append(float(mean))\n\n\nprint('Synthetic results')\nfor model_name in all_models:\n trial_match_any = []\n trial_qvalue = []\n trial_match_fraction = []\n trial_coverage = []\n for trial in range(num_trials):\n\n # save path\n results_path = os.path.join('../results', 'synthetic_'+str(trial))\n save_path = os.path.join(results_path, 'conv_filters')\n\n file_path = os.path.join(save_path, model_name, 'tomtom.tsv')\n best_qvalues, best_match, min_qvalue, match_fraction = helper.match_hits_to_ground_truth(file_path, motifs)\n \n # store results\n trial_qvalue.append(min_qvalue)\n trial_match_fraction.append(match_fraction)\n trial_coverage.append((len(np.where(min_qvalue != 1)[0])-1)/12) # percentage of motifs that are covered\n df = pd.read_csv(os.path.join(file_path), delimiter='\\t')\n trial_match_any.append((len(np.unique(df['Query_ID']))-3)/30) # -3 is because new version of tomtom adds 3 lines of comments under Query_ID \n\n print(\"%s & %.3f$\\pm$%.3f & %.3f$\\pm$%.3f & %.3f$\\pm$%.3f \\\\\\\\\"%(model_name, \n np.mean(mean_roc_trial[model_name]),\n np.std(mean_roc_trial[model_name]),\n np.mean(trial_match_any), \n np.std(trial_match_any),\n np.mean(trial_match_fraction), \n np.std(trial_match_fraction) ) )\n\n\n\n\n","sub_path":"code/3_match_statistics_synthetic.py","file_name":"3_match_statistics_synthetic.py","file_ext":"py","file_size_in_byte":3598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"453987929","text":"import autofit as af\nfrom toy_gaussian.src.pipeline import phase_tagging\nfrom toy_gaussian.src.pipeline.phase import dataset\nfrom toy_gaussian.src.pipeline.phase.imaging.analysis import Analysis\nfrom toy_gaussian.src.pipeline.phase.imaging.meta_imaging_fit import MetaImagingFit\nfrom toy_gaussian.src.pipeline.phase.imaging.result import Result\n\n\nclass PhaseImaging(dataset.PhaseDataset):\n gaussians = af.PhaseProperty(\"gaussians\")\n\n Analysis = Analysis\n Result = Result\n\n @af.convert_paths\n def __init__(\n self,\n paths,\n *,\n gaussians=None,\n optimizer_class=af.MultiNest,\n sub_size=2,\n signal_to_noise_limit=None,\n bin_up_factor=None,\n ):\n\n \"\"\"\n\n A phase in an lens pipeline. Uses the set non_linear optimizer to try to fit models and hyper_gaussians\n passed to it.\n\n Parameters\n ----------\n optimizer_class: class\n The class of a non_linear optimizer\n sub_size: int\n The side length of the subgrid\n \"\"\"\n\n phase_tag = phase_tagging.phase_tag_from_phase_settings(\n sub_size=sub_size,\n signal_to_noise_limit=signal_to_noise_limit,\n bin_up_factor=bin_up_factor,\n )\n paths.phase_tag = phase_tag\n\n super().__init__(paths, gaussians=gaussians, optimizer_class=optimizer_class)\n\n self.meta_imaging_fit = MetaImagingFit(\n model=self.model,\n bin_up_factor=bin_up_factor,\n sub_size=sub_size,\n signal_to_noise_limit=signal_to_noise_limit,\n )\n\n # noinspection PyMethodMayBeStatic,PyUnusedLocal\n def modify_image(self, image, results):\n \"\"\"\n Customize an masked_imaging. e.g. removing lens light.\n\n Parameters\n ----------\n image: scaled_array.ScaledSquarePixelArray\n An masked_imaging that has been masked\n results: autofit.tools.pipeline.ResultsCollection\n The result of the previous lens\n\n Returns\n -------\n masked_imaging: scaled_array.ScaledSquarePixelArray\n The modified image (not changed by default)\n \"\"\"\n return image\n\n def make_analysis(self, dataset, mask, results=None):\n \"\"\"\n Create an lens object. Also calls the prior passing and masked_imaging modifying functions to allow child\n classes to change the behaviour of the phase.\n\n Parameters\n ----------\n mask: Mask\n The default masks passed in by the pipeline\n dataset: im.Imaging\n An masked_imaging that has been masked\n results: autofit.tools.pipeline.ResultsCollection\n The result from the previous phase\n\n Returns\n -------\n lens : Analysis\n An lens object that the non-linear optimizer calls to determine the fit of a set of values\n \"\"\"\n self.meta_imaging_fit.model = self.model\n modified_image = self.modify_image(image=dataset.image, results=results)\n\n masked_imaging = self.meta_imaging_fit.masked_dataset_from(\n dataset=dataset, mask=mask, results=results, modified_image=modified_image\n )\n\n self.output_phase_info()\n\n analysis = self.Analysis(\n masked_imaging=masked_imaging,\n image_path=self.optimizer.paths.image_path,\n results=results,\n )\n\n return analysis\n\n def output_phase_info(self):\n\n file_phase_info = \"{}/{}\".format(\n self.optimizer.paths.phase_output_path, \"phase.info\"\n )\n\n with open(file_phase_info, \"w\") as phase_info:\n phase_info.write(\"Optimizer = {} \\n\".format(type(self.optimizer).__name__))\n phase_info.write(\n \"Sub-grid size = {} \\n\".format(self.meta_imaging_fit.sub_size)\n )\n\n phase_info.close()\n","sub_path":"toy_gaussian/src/pipeline/phase/imaging/phase.py","file_name":"phase.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"576335273","text":"# Authors:\n# Rob Crittenden \n#\n# Copyright (C) 2010 Red Hat\n# see file 'COPYING' for use and warranty information\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\n# Certificates should be stored internally DER-encoded. We can be passed\n# a certificate several ways: read if from LDAP, read it from a 3rd party\n# app (dogtag, candlepin, etc) or as user input. The normalize_certificate()\n# function will convert an incoming certificate to DER-encoding.\n\n# Conventions\n#\n# Where possible the following naming conventions are used:\n#\n# cert: the certificate is a PEM-encoded certificate\n# dercert: the certificate is DER-encoded\n# nsscert: the certificate is an NSS Certificate object\n# rawcert: the cert is in an unknown format\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport base64\nimport re\n\nimport nss.nss as nss\nfrom nss.error import NSPRError\nfrom pyasn1.type import univ, namedtype, tag\nfrom pyasn1.codec.der import decoder, encoder\nimport six\n\nfrom ipapython import ipautil\nfrom ipalib import api\nfrom ipalib import _\nfrom ipalib import util\nfrom ipalib import errors\nfrom ipaplatform.paths import paths\nfrom ipapython.dn import DN\n\nPEM = 0\nDER = 1\n\nPEM_REGEX = re.compile(r'(?<=-----BEGIN CERTIFICATE-----).*?(?=-----END CERTIFICATE-----)', re.DOTALL)\n\nEKU_SERVER_AUTH = '1.3.6.1.5.5.7.3.1'\nEKU_CLIENT_AUTH = '1.3.6.1.5.5.7.3.2'\nEKU_CODE_SIGNING = '1.3.6.1.5.5.7.3.3'\nEKU_EMAIL_PROTECTION = '1.3.6.1.5.5.7.3.4'\nEKU_ANY = '2.5.29.37.0'\nEKU_PLACEHOLDER = '1.3.6.1.4.1.3319.6.10.16'\n\n_subject_base = None\n\ndef subject_base():\n global _subject_base\n\n if _subject_base is None:\n config = api.Command['config_show']()['result']\n _subject_base = DN(config['ipacertificatesubjectbase'][0])\n\n return _subject_base\n\ndef valid_issuer(issuer):\n if not api.Command.ca_is_enabled()['result']:\n return True\n # Handle all supported forms of issuer -- currently dogtag only.\n if api.env.ra_plugin == 'dogtag':\n return DN(issuer) == DN(('CN', 'Certificate Authority'), subject_base())\n return True\n\ndef strip_header(pem):\n \"\"\"\n Remove the header and footer from a certificate.\n \"\"\"\n s = pem.find(\"-----BEGIN CERTIFICATE-----\")\n if s >= 0:\n e = pem.find(\"-----END CERTIFICATE-----\")\n pem = pem[s+27:e]\n\n return pem\n\ndef initialize_nss_database(dbdir=None):\n \"\"\"\n Initializes NSS database, if not initialized yet. Uses a proper database\n directory (.ipa/alias or HTTPD_ALIAS_DIR), depending on the value of\n api.env.in_tree.\n \"\"\"\n\n if not nss.nss_is_initialized():\n if dbdir is None:\n if 'in_tree' in api.env:\n if api.env.in_tree:\n dbdir = api.env.dot_ipa + os.sep + 'alias'\n else:\n dbdir = paths.HTTPD_ALIAS_DIR\n nss.nss_init(dbdir)\n else:\n nss.nss_init_nodb()\n else:\n nss.nss_init(dbdir)\n\ndef load_certificate(data, datatype=PEM, dbdir=None):\n \"\"\"\n Given a base64-encoded certificate, with or without the\n header/footer, return a request object.\n\n Returns a nss.Certificate type\n \"\"\"\n if type(data) in (tuple, list):\n data = data[0]\n\n if (datatype == PEM):\n data = strip_header(data)\n data = base64.b64decode(data)\n\n initialize_nss_database(dbdir=dbdir)\n\n if six.PY2:\n return nss.Certificate(buffer(data))\n else:\n # In python 3 , `bytes` has the buffer interface\n return nss.Certificate(data)\n\ndef load_certificate_from_file(filename, dbdir=None):\n \"\"\"\n Load a certificate from a PEM file.\n\n Returns a nss.Certificate type\n \"\"\"\n fd = open(filename, 'r')\n data = fd.read()\n fd.close()\n\n return load_certificate(data, PEM, dbdir)\n\ndef load_certificate_list(data, dbdir=None):\n certs = PEM_REGEX.findall(data)\n certs = [load_certificate(cert, PEM, dbdir) for cert in certs]\n return certs\n\ndef load_certificate_list_from_file(filename, dbdir=None):\n \"\"\"\n Load a certificate list from a PEM file.\n\n Returns a list of nss.Certificate objects.\n \"\"\"\n fd = open(filename, 'r')\n data = fd.read()\n fd.close()\n\n return load_certificate_list(data, dbdir)\n\ndef get_subject(certificate, datatype=PEM, dbdir=None):\n \"\"\"\n Load an X509.3 certificate and get the subject.\n \"\"\"\n\n nsscert = load_certificate(certificate, datatype, dbdir)\n subject = nsscert.subject\n del(nsscert)\n return subject\n\ndef get_issuer(certificate, datatype=PEM, dbdir=None):\n \"\"\"\n Load an X509.3 certificate and get the issuer.\n \"\"\"\n\n nsscert = load_certificate(certificate, datatype, dbdir)\n issuer = nsscert.issuer\n del(nsscert)\n return issuer\n\ndef get_serial_number(certificate, datatype=PEM, dbdir=None):\n \"\"\"\n Return the decimal value of the serial number.\n \"\"\"\n nsscert = load_certificate(certificate, datatype, dbdir)\n serial_number = nsscert.serial_number\n del(nsscert)\n return serial_number\n\ndef is_self_signed(certificate, datatype=PEM, dbdir=None):\n nsscert = load_certificate(certificate, datatype, dbdir)\n self_signed = (nsscert.issuer == nsscert.subject)\n del nsscert\n return self_signed\n\nclass _TBSCertificate(univ.Sequence):\n componentType = namedtype.NamedTypes(\n namedtype.NamedType(\n 'version',\n univ.Integer().subtype(explicitTag=tag.Tag(\n tag.tagClassContext, tag.tagFormatSimple, 0))),\n namedtype.NamedType('serialNumber', univ.Integer()),\n namedtype.NamedType('signature', univ.Sequence()),\n namedtype.NamedType('issuer', univ.Sequence()),\n namedtype.NamedType('validity', univ.Sequence()),\n namedtype.NamedType('subject', univ.Sequence()),\n namedtype.NamedType('subjectPublicKeyInfo', univ.Sequence()),\n namedtype.OptionalNamedType(\n 'issuerUniquedID',\n univ.BitString().subtype(implicitTag=tag.Tag(\n tag.tagClassContext, tag.tagFormatSimple, 1))),\n namedtype.OptionalNamedType(\n 'subjectUniquedID',\n univ.BitString().subtype(implicitTag=tag.Tag(\n tag.tagClassContext, tag.tagFormatSimple, 2))),\n namedtype.OptionalNamedType(\n 'extensions',\n univ.Sequence().subtype(explicitTag=tag.Tag(\n tag.tagClassContext, tag.tagFormatSimple, 3))),\n )\n\nclass _Certificate(univ.Sequence):\n componentType = namedtype.NamedTypes(\n namedtype.NamedType('tbsCertificate', _TBSCertificate()),\n namedtype.NamedType('signatureAlgorithm', univ.Sequence()),\n namedtype.NamedType('signature', univ.BitString()),\n )\n\ndef _get_der_field(cert, datatype, dbdir, field):\n cert = load_certificate(cert, datatype, dbdir)\n cert = cert.der_data\n cert = decoder.decode(cert, _Certificate())[0]\n field = cert['tbsCertificate'][field]\n field = encoder.encode(field)\n return field\n\ndef get_der_subject(cert, datatype=PEM, dbdir=None):\n return _get_der_field(cert, datatype, dbdir, 'subject')\n\ndef get_der_issuer(cert, datatype=PEM, dbdir=None):\n return _get_der_field(cert, datatype, dbdir, 'issuer')\n\ndef get_der_serial_number(cert, datatype=PEM, dbdir=None):\n return _get_der_field(cert, datatype, dbdir, 'serialNumber')\n\ndef get_der_public_key_info(cert, datatype=PEM, dbdir=None):\n return _get_der_field(cert, datatype, dbdir, 'subjectPublicKeyInfo')\n\ndef get_ext_key_usage(certificate, datatype=PEM, dbdir=None):\n nsscert = load_certificate(certificate, datatype, dbdir)\n if not nsscert.extensions:\n return None\n\n for ext in nsscert.extensions:\n if ext.oid_tag == nss.SEC_OID_X509_EXT_KEY_USAGE:\n break\n else:\n return None\n\n eku = nss.x509_ext_key_usage(ext.value, nss.AsDottedDecimal)\n eku = set(o[4:] for o in eku)\n return eku\n\ndef make_pem(data):\n \"\"\"\n Convert a raw base64-encoded blob into something that looks like a PE\n file with lines split to 64 characters and proper headers.\n \"\"\"\n pemcert = '\\r\\n'.join([data[x:x+64] for x in range(0, len(data), 64)])\n return '-----BEGIN CERTIFICATE-----\\n' + \\\n pemcert + \\\n '\\n-----END CERTIFICATE-----'\n\ndef normalize_certificate(rawcert):\n \"\"\"\n Incoming certificates should be DER-encoded. If not it is converted to\n DER-format.\n\n Note that this can't be a normalizer on a Param because only unicode\n variables are normalized.\n \"\"\"\n if not rawcert:\n return None\n\n rawcert = strip_header(rawcert)\n\n if util.isvalid_base64(rawcert):\n try:\n dercert = base64.b64decode(rawcert)\n except Exception as e:\n raise errors.Base64DecodeError(reason=str(e))\n else:\n dercert = rawcert\n\n # At this point we should have a certificate, either because the data\n # was base64-encoded and now its not or it came in as DER format.\n # Let's decode it and see. Fetching the serial number will pass the\n # certificate through the NSS DER parser.\n validate_certificate(dercert, datatype=DER)\n\n return dercert\n\n\ndef validate_certificate(cert, datatype=PEM, dbdir=None):\n \"\"\"\n Perform certificate validation by trying to load it into NSS database\n \"\"\"\n try:\n load_certificate(cert, datatype=datatype, dbdir=dbdir)\n except NSPRError as nsprerr:\n if nsprerr.errno == -8183: # SEC_ERROR_BAD_DER\n raise errors.CertificateFormatError(\n error=_('improperly formatted DER-encoded certificate'))\n else:\n raise errors.CertificateFormatError(error=str(nsprerr))\n\n\ndef write_certificate(rawcert, filename):\n \"\"\"\n Write the certificate to a file in PEM format.\n\n The cert value can be either DER or PEM-encoded, it will be normalized\n to DER regardless, then back out to PEM.\n \"\"\"\n dercert = normalize_certificate(rawcert)\n\n try:\n fp = open(filename, 'w')\n fp.write(make_pem(base64.b64encode(dercert)))\n fp.close()\n except (IOError, OSError) as e:\n raise errors.FileError(reason=str(e))\n\ndef write_certificate_list(rawcerts, filename):\n \"\"\"\n Write a list of certificates to a file in PEM format.\n\n The cert values can be either DER or PEM-encoded, they will be normalized\n to DER regardless, then back out to PEM.\n \"\"\"\n dercerts = [normalize_certificate(rawcert) for rawcert in rawcerts]\n\n try:\n with open(filename, 'w') as f:\n for cert in dercerts:\n cert = base64.b64encode(cert)\n cert = make_pem(cert)\n f.write(cert + '\\n')\n except (IOError, OSError) as e:\n raise errors.FileError(reason=str(e))\n\ndef verify_cert_subject(ldap, hostname, dercert):\n \"\"\"\n Verify that the certificate issuer we're adding matches the issuer\n base of our installation.\n\n This assumes the certificate has already been normalized.\n\n This raises an exception on errors and returns nothing otherwise.\n \"\"\"\n nsscert = load_certificate(dercert, datatype=DER)\n subject = str(nsscert.subject)\n issuer = str(nsscert.issuer)\n del(nsscert)\n\n if (not valid_issuer(issuer)):\n raise errors.CertificateOperationError(error=_('Issuer \"%(issuer)s\" does not match the expected issuer') % \\\n {'issuer' : issuer})\n\nclass _Extension(univ.Sequence):\n componentType = namedtype.NamedTypes(\n namedtype.NamedType('extnID', univ.ObjectIdentifier()),\n namedtype.NamedType('critical', univ.Boolean()),\n namedtype.NamedType('extnValue', univ.OctetString()),\n )\n\ndef _encode_extension(oid, critical, value):\n ext = _Extension()\n ext['extnID'] = univ.ObjectIdentifier(oid)\n ext['critical'] = univ.Boolean(critical)\n ext['extnValue'] = univ.OctetString(value)\n ext = encoder.encode(ext)\n return ext\n\nclass _ExtKeyUsageSyntax(univ.SequenceOf):\n componentType = univ.ObjectIdentifier()\n\ndef encode_ext_key_usage(ext_key_usage):\n eku = _ExtKeyUsageSyntax()\n for i, oid in enumerate(ext_key_usage):\n eku[i] = univ.ObjectIdentifier(oid)\n eku = encoder.encode(eku)\n return _encode_extension('2.5.29.37', EKU_ANY not in ext_key_usage, eku)\n\nif __name__ == '__main__':\n # this can be run with:\n # python ipalib/x509.py < /etc/ipa/ca.crt\n\n from ipalib import api\n api.bootstrap()\n api.finalize()\n\n nss.nss_init_nodb()\n\n # Read PEM certs from stdin and print out its components\n\n certlines = sys.stdin.readlines()\n cert = ''.join(certlines)\n\n nsscert = load_certificate(cert)\n\n print(nsscert)\n","sub_path":"ipalib/x509.py","file_name":"x509.py","file_ext":"py","file_size_in_byte":13185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"356444379","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.graph_objs as go\nimport pandas as pd\ndf = pd.read_csv('https://raw.githubusercontent.com/bijoordzen/temperatureplot/main/temp.csv')\np =(df['temp'])\nq = (df['date and time'])\na = p[:50]\nb = q[:50]\napp = dash.Dash()\nserver = app.server\n\napp.layout = html.Div([html.H1(\"Team B\"),\n\t\t\t html.Div(\"Sensor Plots\"),\n html.Label('Choose Data to Display'),\n dcc.Dropdown(\n id = 'first-dropdown',\n options =[\n {'label':'SD Card Temeprature Sensor Data ','value': 'sdcard'},\n {'label':'Realtime Temeprature Sensor Data ','value': 'lm35rlt'},\n {'label':'Coolant Temperature','value': 'coolant'},\n {'label':'Fuel Temperature','value': 'fuel'}\n ],\n multi = True\n ),\n\t\t\t dcc.Graph(\n id='graph',\n figure={\n 'data': [\n {'x': b, 'y': a, 'type': 'line'},\n ],\n 'layout': {\n 'title': 'Temperature vs Time'\n }\n }\n )\n \n])\n\nif __name__ == '__main__':\n\tapp.run_server()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"416889238","text":"# https://atcoder.jp/contests/sumitrust2019/tasks/sumitb2019_c\nimport sys\nsys.setrecursionlimit(2147483647)\nINF=float(\"inf\")\nMOD=10**9+7\ninput=lambda:sys.stdin.readline().rstrip()\ndef resolve():\n x=int(input())\n for k in range(1,1001):\n if(100*k<=x<=105*k):\n print(1)\n return\n print(0)\nresolve()\n","sub_path":"三井住友信託銀行プログラミングコンテスト2019/c_100_to_105.py","file_name":"c_100_to_105.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"3186763","text":"from locadora.settings.common import *\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = ')6g95h^0k@gjw5p!e9(b2u*j0@2spcb^9$eu2ht7ya@zud5y36'\n\n# Database\n# https://docs.djangoproject.com/en/2.1/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\nGRAPH_MODELS = {\n 'all_applications': True,\n 'group_models': True,\n}\n","sub_path":"locadora/settings/development.py","file_name":"development.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"190772254","text":" #\n # [o]\n # [o, l]\n # [o, l, l]\n # [o, l, l, e]\n # [o, l, l, e, H]\n # [o, l, l, e]\n # [o, l, l]\n # [o, l]\n # [o]\n\n\ndef lazy_triangle(text):\n first_half = [[i for i in text[::-1][0:len(text)-j]]for j in range(len(text[::-1]))][::-1]\n second_half = [[i for i in text[len(text)-1-j:len(text)]][::-1]for j in range(len(text)-1)][::-1]\n result = first_half + second_half\n for i in result:\n print(i)\n\n\nif __name__ == '__main__':\n lazy_triangle(\"hello\")","sub_path":"dojo_triangle_array/dojo_triangle.py","file_name":"dojo_triangle.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"164693701","text":"import sublime\nimport subprocess\n\nfrom threading import Thread\nfrom ..client.ClientManager import get_client_manager\n\nclass ServerManager:\n\n\t_instance = None\n\n\tdef run(self):\t\n\t\tthread = Thread(target = self.startserver, args=())\n\t\tthread.start()\n\t\tprint(\"INFO: SERVER THREAD STARTED.\")\n\n\tdef startserver(self):\n\t\ttry:\n\t\t\tsubprocess.call([get_client_manager().server_path, str(get_client_manager().port), 'True'])\n\n\t\texcept OSError:\n\t\t\tif (get_client_manager().server_init_error == False):\n\t\t\t\tsublime.message_dialog(\"The the servers path not valid! Please give it below.\")\n\t\t\t\tsublime.active_window().run_command(\"set_server_path\")\n\ndef get_server_manager():\n\tif ServerManager._instance is None:\n\t\tServerManager._instance = ServerManager()\n\treturn ServerManager._instance\n\ndef get_a_new_server_manager():\n\tServerManager._instance = ServerManager()\n\treturn ServerManager._instance","sub_path":"server/ServerManager.py","file_name":"ServerManager.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"496022689","text":"import pickle\nfrom pickleHack import Record\nfrom flask import Flask, request, render_template\nfrom bs4 import BeautifulSoup\nimport urllib\nimport subprocess\nfrom subprocess import call\nimport re\nfrom flask import make_response\n\n\napp = Flask(__name__)\n\n\n#This makes an endpoint that the form will go to\n@app.route(\"/form\", methods=[\"POST\"])\ndef form():\n\tprint (request.form)\n\ttry:\n\t\tvalues = request.form\n\t\tprint(values)\n\t\tsingleRecord = Record(values['age'], values['procedure'], values['zipcode'],values['gender'], values['race'], values['height'], values['weight'], values['medication'],values['dosage'],values['doses'])\n\t\t\n\t\trecords.append(singleRecord)\n\t\tpickle.dump(records,open('records.pickle','wb'))\n\t\tdose = singleRecord.doses.strip().upper()\n\t\tprint(\"asdfa\"+dose)\n\t\tclass AppURLopener(urllib.FancyURLopener):version = \"Mozilla/5.0\"\n\t\turllib._urlopener = AppURLopener()\n\t\tf = urllib.urlopen(\"http://datawrapper.dwcdn.net/aL51p/3/\")\n\t\tmyfile = f.read()\n\t\topioids = \"Hydrocodone (Vicodin and Norco) 5 mg tablet\\tCodeine (Tylenol #3) 30 mg tablets\\tTramadol 50 mg tablets\\tOxycodne 5 mg tablets\"\n\t\tnewfile= \"\\nLaparoscopic Cholecystectomy\\t15\\t15\\t15\\t10\\nLaparoscopic Appendectomy\\t15\\t15\\t15\\t10\\nInguinal\\/Femoral Hernia Repair (open\\/laparoscopic)\\t15\\t15\\t15\\t10\\nOpen Incisional Hernia Repair\\t40\\t40\\t40\\t25\\nLaparoscopic Colectomy\\t35\\t35\\t35\\t25\\nOpen Colectomy\\t40\\t40\\t40\\t25\\nHysterectomy vaginal \\t20\\t20\\t20\\t15\\nHyseretomy Laparoscopic & Robotic\\t30\\t30\\t30\\t20\\nHysterectomy Abdominal \\t40\\t40\\t40\\t25\\nWide Local Excision \\u00b1 Sentinel Lymph Node Biopsy\\t30\\t30\\t30\\t20\\nSimple Mastectomy \\u00b1 Sentinel Lymph Node Biopsy\\t30\\t30\\t30\\t20\\nLumpectomy \\u00b1 Sentinel Lymph Node Biopsy\\t15\\t15\\t15\\t10\\nBreast Biopsy or Sentinel Lymph Node Biopsy\\t15\\t15\\t15\\t10\"\n\t\tmydata = newfile.split(\"\\n\")\n\t\topioidString = opioids.split(\"\\t\")\n\t\tfor line in mydata:\n\t\t\tcell = line.split(\"\\t\")\n\t\t\tif cell[0].strip().upper() == singleRecord.procedure.strip().upper():\n\t\t\t\tfor med in opioidString:\t\n\t\t\t\t\tif med.strip().upper() == singleRecord.medication.strip().upper():\n\t\t\t\t\t\t#return \"





Here are your results...



If you are no longer in pain, but still have extra medication,

you can visit \" + \"http://disposemymeds.org/medicine-disposal-locator/\" + \" to find the closest facility to safely dispense of them.






This is your procedure and the amount of pills you SHOULD be prescribed after surgery: \" + cell[0] + \" , \" + med + \" , \" + cell[1] + \" tablets.\"\n\t\t\t\t\t\ttablets = float(re.sub(\"[^0-9.]\", \"\", med))*float(cell[1])/float(singleRecord.dosage.strip().upper())\n\t\t\t\t\t\tresponse = make_response(render_template('graphs.html', cell=cell, med=med, tablets = round(tablets,1)))\n\t\t\t\t\t\t\n\t\t\t\t\t\treturn response\n\t\t\t\t\t\t# pro = cell[0] \n\t\t\t\t\t\t# medi = med\n\t\t\t\t\t\t# s = \"\"\"
Here are your results...\n\t\t\t\t\t\t# \t

\n\t\t\t\t\t\t# \t

\n\t\t\t\t\t\t# \t

\n\t\t\t\t\t\t# \t

\n\t\t\t\t\t\t# \t\n\t\t\t\t\t\t# \t
Here are your results...= topk:\n break\n similar_word = self._idx_to_vocab[similar_idx]\n similars.append((similar_word, 1-dist[similar_idx]))\n\n return similars\n\n def infer_wordvec(self, word2vec_corpus, word_set, append=True):\n \"\"\"\n :param word2vec_corpus: utils.Word2VecCorpus (like)\n It yield sent. The form of sent is list of str\n :param word_set: set of str\n Words that we want to infer vectors\n :param append: Boolean\n If True, vector of unseen words are stored in model.\n\n It returns\n ----------\n y : numpy.ndarray\n Inferred word vectors. (n_words, size)\n \"\"\"\n\n WW, idx_to_vocab_ = self.vectorize_word_context_matrix(\n word2vec_corpus, word_set)\n\n self.infer_wordvec_from_vector(WW, idx_to_vocab_, append)\n\n def infer_wordvec_from_vector(self, X, row_to_vocab=None, append=True):\n \"\"\"\n :param X: scipy.sparse.csr_matrix\n (word, context) cooccurrance matrix\n :param row_to_vocab: list of str\n Word index that corresponds row of X\n :param append: Boolean\n If True, vector of unseen words are stored in model.\n\n It returns\n ----------\n y : numpy.ndarray\n Inferred word vectors. (n_words, size)\n \"\"\"\n\n if (append) and (row_to_vocab is None):\n raise ValueError('row_to_vocab should be inserted if append = True')\n\n pmi_ww, _, _ = train_pmi(X,\n py=self._py, beta=1, min_pmi=0)\n\n y = safe_sparse_dot(pmi_ww, self._transformer)\n\n if append:\n n = self.wv.shape[0]\n idx_ = [i for i, vocab in enumerate(row_to_vocab)\n if not (vocab in self._vocab_to_idx_)]\n\n # if exist no word to be appended\n if not idx_:\n return y\n\n vocabs_ = [row_to_vocab[i] for i in idx_]\n vec_ = y[np.asarray(idx_)]\n\n self._idx_to_vocab += vocabs_\n for i, vocab in enumerate(vocabs_):\n self._vocab_to_idx_[vocab] = n + i\n self.wv = np.vstack([self.wv, vec_])\n self.n_vocabs += len(idx_)\n\n if self._verbose:\n print('%d terms are appended' % len(vocabs_))\n\n return y\n\n def vectorize_word_context_matrix(self, word2vec_corpus, word_set):\n \"\"\"\n :param word2vec_corpus: utils.Word2VecCorpus (like)\n It yield sent. The form of sent is list of str\n :param word_set: set of str\n Words that we want to infer vectors\n\n It returns\n ----------\n WW : scipy.sparse.csr_matrix\n WW[word, context] = frequency\n idx_to_vocab_ : list of str\n Word list that corresponds rows of WW\n \"\"\"\n\n WWd = word_context(\n sents = word2vec_corpus,\n windows = self._window,\n dynamic_weight = self._dynamic_weight,\n verbose = self._verbose,\n vocab_to_idx = self._vocab_to_idx,\n row_vocabs = word_set\n )\n\n idx_to_vocab_ = [vocab for vocab in sorted(word_set)]\n vocab_to_idx_ = {vocab:idx for idx, vocab in enumerate(idx_to_vocab_)}\n\n WW = dict_to_sparse(\n dd = WWd,\n row_to_idx = vocab_to_idx_,\n col_to_idx = self._vocab_to_idx)\n\n return WW, idx_to_vocab_\n","sub_path":"text_embedding/word2vec.py","file_name":"word2vec.py","file_ext":"py","file_size_in_byte":9883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"249549715","text":"import torch\n\n\ndef accuracy_score(y_true: torch.Tensor, y_pred: torch.Tensor) -> float:\n \"\"\"Implementation of accuracy score for torch.Tensor.\n\n Prameters\n ---------\n y_true: torch.Tensor\n Truth label with 1D.\n\n y_pred: torch.Tensor\n Predicted label with 1D or 2D(batch, class, ).\n\n Returns\n -------\n accuracy: float\n Accuracy score.\n \"\"\"\n if len(y_true) != len(y_pred):\n raise ValueError(f'y_true and y_pred are inconsistent length, must be'\n f' same length. Got length y_true:{len(y_true)} and'\n f'y_pred: {len(y_pred)})')\n\n if y_pred.dim() == 2:\n _, y_pred = y_pred.max(dim=1)\n y_pred = y_pred.float()\n elif y_pred.dim() >= 3:\n raise ValueError(f'The shape of y_pred must be (batch, class, ) or '\n f'(batch, ). But got shape {y_pred.size()}')\n\n acc = torch.eq(y_true, y_pred).float().mean().item()\n return acc","sub_path":"src/metrics/accuracy.py","file_name":"accuracy.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"333710177","text":"import argparse\nimport os\nfrom collections import OrderedDict\nfrom pprint import pprint\nimport warnings\n# warnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.simplefilter(action='ignore')\nimport tensorflow as tf\n# For pybullet envs\nwarnings.filterwarnings(\"ignore\")\nimport gym\nimport mujoco_py\ntry:\n import pybullet_envs\nexcept ImportError:\n pybullet_envs = None\nimport numpy as np\nimport yaml\n\nfrom stable_baselines import TRPO, OPOLO, TRPOGAIFO, PPO2, HER\nfrom stable_baselines.common import set_global_seeds\nfrom stable_baselines.results_plotter import load_results, ts2xy\nfrom stable_baselines.common.vec_env import DummyVecEnv\nfrom stable_baselines.common.evaluation import evaluate_policy\n\nfrom simulation_grounding.atp_envs import GroundedEnv, MujocoNormalized\n\nbest_mean_reward, n_steps = -np.inf, 0\n\ndef eval_real_callback(log_dir, eval_real_env, eval_grnd_env, n_eval_episodes = 5):\n def callback(_locals, _globals):\n \"\"\"\n Callback called at each step (for DQN an others) or after n steps (see ACER or PPO2)\n :param _locals: (dict)\n :param _globals: (dict)\n \"\"\"\n global n_steps\n # Print stats every 20 calls\n if (n_steps + 1) % 1 == 0:\n # Evaluate policy training performance\n episode_rewards, episode_lengths = evaluate_policy(_locals['self'], eval_real_env,\n n_eval_episodes=n_eval_episodes,\n render=False,\n deterministic=False,\n return_episode_rewards=False)\n print(\"Last mean reward per episode at target: {:.2f}\".format(episode_rewards))\n\n episode_rewards_grnd, episode_lengths_grnd = evaluate_policy(_locals['self'], eval_grnd_env,\n n_eval_episodes=n_eval_episodes,\n render=False,\n deterministic=False,\n return_episode_rewards=False)\n print(\"Last mean reward per episode at grounded environment: {:.2f}\".format(episode_rewards_grnd))\n\n with open(os.path.join(log_dir, 'eval_at_target.txt'), 'a') as f:\n f.write(\"{}, {}, {}\\n\".format(n_steps, episode_rewards, episode_lengths/n_eval_episodes))\n f.close()\n with open(os.path.join(log_dir, 'eval_at_grnd.txt'), 'a') as f:\n f.write(\"{}, {}, {}\\n\".format(n_steps, episode_rewards_grnd, episode_lengths_grnd/n_eval_episodes))\n f.close()\n n_steps += 1\n return True\n return callback\n\ndef evaluate_policy_on_env(env,\n model,\n render=False,\n iters=50,\n deterministic=False\n ):\n return_list = []\n for i in range(iters):\n return_val = 0\n done = False\n obs = env.reset()\n while not done:\n action, _state = model.predict(obs, deterministic=deterministic)\n obs, rewards, done, info = env.step(action)\n return_val+=rewards\n if render:\n env.render()\n\n if not i%15: print('Iteration ', i, ' done.')\n return_list.append(return_val)\n print('***** STATS FOR THIS RUN *****')\n print('MEAN : ', np.mean(return_list))\n print('STD : ', np.std(return_list))\n return np.mean(return_list), np.std(return_list)/np.sqrt(len(return_list)), 0.0\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # parser.add_argument('--env', default='Hopper-v2', help=\"Name of the simulator environment (Unmodified)\")\n # parser.add_argument('--real_env', default='HopperFrictionModified-v2', help=\"Name of the Real World environment (Modified)\")\n # parser.add_argument('--rollout_policy_path', default=\"../simulation_grounding/models/TRPO_initial_policy_steps_Hopper-v2_2000000_.pkl\", help=\"relative path of initial policy trained in sim\")\n\n # parser.add_argument('--env', default='Walker2d-v2', help=\"Name of the simulator environment (Unmodified)\")\n # parser.add_argument('--real_env', default='Walker2dModified-v2', help=\"Name of the Real World environment (Modified)\")\n # parser.add_argument('--rollout_policy_path', default=\"../simulation_grounding/models/TRPO_initial_policy_steps_Walker2d-v2_2000000_.pkl\", help=\"relative path of initial policy trained in sim\")\n\n parser.add_argument('--env', default='InvertedPendulum-v2', help=\"Name of the simulator environment (Unmodified)\")\n parser.add_argument('--real_env', default='InvertedPendulumModified-v2', help=\"Name of the Real World environment (Modified)\")\n parser.add_argument('--rollout_policy_path', default=\"../simulation_grounding/models/TRPO_initial_policy_steps_InvertedPendulum-v2_1000000_.pkl\", help=\"relative path of initial policy trained in sim\")\n\n parser.add_argument('--ground_algo', help='Grounding Algorithm', default='TRPOGAIFO', type=str) # TRPO, PPO2, TRPOGAIFO, OPOLO\n parser.add_argument('--atp_policy_path', default=\"../run/test/logs/trpo-gaifo/trpogaifo/InvertedPendulum-v2/rank1/action_transformer_policy1.pkl\", type=str, help='relative path of action transformer policy')\n # parser.add_argument('--atp_policy_path', default=\"../run/test/logs/td3-opolo-idm-decay-reg/opolo/InvertedPendulum-v2/rank1/action_transformer_policy1.pkl\", type=str, help='relative path of action transformer policy')\n parser.add_argument('--seed', help='Random generator seed', type=int, default=1)\n\n parser.add_argument('--log-interval', help='Override log interval (default: -1, no change)', default=1000, type=int)\n parser.add_argument('--log-dir', help='Log directory', type=str, default='opolo-baselines/simulation_grounding/target_policies/') # required=True,\n parser.add_argument('--verbose', help='Verbose mode (0: no output, 1: INFO)', default=1,type=int)\n args = parser.parse_args()\n\n PATH_PREFIX = 'opolo-baselines'\n args.env = \"FetchReach-v1\"\n args.real_env = \"FetchReach-v1\"\n # her rollout policy and real env\n args.rollout_policy_path = PATH_PREFIX + \"/run/test.zip\"\n args.atp_policy_path = PATH_PREFIX + \"/run/test/logs/trpo-gaifo/trpogaifo/FetchReach-v1/rank1/action_transformer_policy1.pkl\"\n\n # extend log directory with experiment details\n new_log_dir = os.path.join(args.log_dir, args.env, args.ground_algo, 'rank{}'.format(args.seed))\n args.log_dir = new_log_dir\n\n ################################################\n set_global_seeds(args.seed)\n tensorboard_log = os.path.join(args.log_dir, 'tb')\n\n print(\"=\" * 10, args.env, \"=\" * 10)\n os.makedirs(args.log_dir, exist_ok=True)\n\n #####################################\n # Load model\n #####################################\n print('LOADING -PRETRAINED- INITIAL POLICY')\n with open('opolo-baselines/hyperparams/her.yml') as file:\n policy_params = yaml.load(file, Loader=yaml.FullLoader)\n\n print('Using TRPO as the Target Policy Algo')\n policy_params = policy_params[args.env]\n\n # Create grounded environment\n if args.ground_algo == 'TRPO':\n atp_environment = TRPO.load(load_path=args.atp_policy_path, seed=args.seed+100)\n scale_atp = False\n elif args.ground_algo == 'PPO2':\n config = {'expert_data_path': None}\n atp_environment = PPO2.load(load_path=args.atp_policy_path, seed=args.seed+100, config=config)\n scale_atp = False\n elif args.ground_algo == 'TRPOGAIFO':\n config = {'expert_data_path': None}\n atp_environment = TRPOGAIFO.load(load_path=args.atp_policy_path, seed=args.seed+100, config=config)\n scale_atp = False\n elif args.ground_algo == 'OPOLO':\n config = {'expert_data_path': None, 'shaping_mode': 'td3-opolo-idm-decay-reg'}\n atp_environment = OPOLO.load(load_path=args.atp_policy_path, seed=args.seed+100, config=config)\n scale_atp = True\n\n sim_env = gym.make(args.env)\n if 'env_wrapper' in policy_params.keys():\n if 'MujocoNormalized' in policy_params['env_wrapper']:\n sim_env = MujocoNormalized(sim_env)\n\n use_deterministic = False\n grnd_env = GroundedEnv(env=sim_env,\n action_tf_policy=atp_environment,\n debug_mode=False,\n data_collection_mode=False,\n use_deterministic=use_deterministic,\n atp_policy_noise=0.0,\n scale_atp=scale_atp,\n )\n grnd_env.seed(args.seed)\n\n ### SET CALLBACK\n kwargs = {}\n if args.log_interval > -1:\n kwargs = {'log_interval': args.log_interval}\n\n sim_eval_env = gym.make(args.env)\n if 'env_wrapper' in policy_params.keys():\n if 'MujocoNormalized' in policy_params['env_wrapper']:\n sim_eval_env = MujocoNormalized(sim_eval_env)\n\n use_deterministic = False\n grnd_eval_env = GroundedEnv(env=sim_eval_env,\n action_tf_policy=atp_environment,\n debug_mode=False,\n data_collection_mode=False,\n use_deterministic=use_deterministic,\n atp_policy_noise=0.0,\n scale_atp=scale_atp,\n )\n grnd_eval_env.seed(args.seed)\n\n real_callback_env = gym.make(args.real_env)\n real_callback_env.seed(args.seed)\n if 'env_wrapper' in policy_params.keys():\n if 'MujocoNormalized' in policy_params['env_wrapper']:\n real_callback_env = MujocoNormalized(real_callback_env)\n cb_func = eval_real_callback(log_dir=args.log_dir, eval_real_env = real_callback_env, eval_grnd_env = grnd_eval_env)\n\n ##### Load source policy\n\n # model = HER.load(\n # args.rollout_policy_path,\n # seed=args.seed,\n # env=DummyVecEnv([lambda:grnd_env]),\n # verbose=args.verbose,\n # # disabled tensorboard temporarily\n # # tensorboard_log=None,\n # tensorboard_log=tensorboard_log,\n # n_timesteps=policy_params['n_timesteps'],\n # policy=policy_params['policy'],\n # model_class=DDPG,\n # n_sampled_goal=policy_params['n_sampled_goal'],\n # goal_selection_strategy=policy_params['goal_selection_strategy'],\n # buffer_size=policy_params['buffer_size'],\n # batch_size=policy_params['batch_size'],\n # gamma=policy_params['gamma'],\n # random_exploration=policy_params['random_exploration'],\n # actor_lr=policy_params['actor_lr'],\n # critic_lr=policy_params['critic_lr'],\n # noise_typer=policy_params['noise_type'],\n # noise_std=policy_params['noise_std'],\n # normalize_observations=policy_params['normalize_observations'], \n # normalize_returns=policy_params['normalize_returns'],\n # # policy_kwargs=policy_params['policy_kwargs']ss\n\n # # timesteps_per_batch=policy_params['timesteps_per_batch'],\n # # lam=policy_params['lam'],\n # # max_kl=policy_params['max_kl'],\n # # gamma=policy_params['gamma'],\n # # vf_iters=policy_params['vf_iters'],\n # # vf_stepsize=policy_params['vf_stepsize'],\n # # entcoeff=policy_params['entcoeff'],\n # # cg_damping=policy_params['cg_damping'],\n # # cg_iters=policy_params['cg_iters']\n # )\n\n # above did not work\n from stable_baselines.ddpg import DDPG\n model = HER.load(args.rollout_policy_path, env=grnd_env, model_class=DDPG, seed=args.seed)\n\n ##### Learn policy\n n_timesteps = int(policy_params['n_timesteps'])\n model.learn(\n total_timesteps=n_timesteps,\n callback=cb_func,\n reset_num_timesteps=True,\n **kwargs)\n model.save(os.path.join(args.log_dir, 'target_policy.pkl'))\n\n # model = HER.load(os.path.join(args.log_dir, 'target_policy.pkl'), env=grnd_env, seed=args.seed)\n\n with open(os.path.join(args.log_dir, 'config.yml'), 'w') as f:\n yaml.dump(policy_params, f)\n\n ##### Evaluate policy in target environment\n real_env = gym.make(args.real_env)\n if 'env_wrapper' in policy_params.keys():\n if 'MujocoNormalized' in policy_params['env_wrapper']:\n real_env = MujocoNormalized(real_env)\n\n real_env.seed(args.seed)\n val = evaluate_policy_on_env(real_env,\n model,\n render=False,\n iters=50,\n deterministic=True\n )\n with open(args.log_dir + \"/output.txt\", \"a\") as txt_file:\n print(val, file=txt_file)\n\n real_env.seed(args.seed)\n val = evaluate_policy_on_env(real_env,\n model,\n render=False,\n iters=50,\n deterministic=False\n )\n with open(args.log_dir + \"/stochastic_output.txt\", \"a\") as txt_file:\n print(val, file=txt_file)","sub_path":"opolo-baselines/simulation_grounding/train_target_policy.py","file_name":"train_target_policy.py","file_ext":"py","file_size_in_byte":13324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"479414728","text":"import re\nimport numpy as np\n\nclass Justify :\n \"\"\" Author : Dimitri K. Sifoua \n Date : April 19, 2018\n This class is use to justify some text.\n Properties :\n - paragraphs = text divided into paragraphs\n - width = max length of each line\n - text_justified = array that contains each line of text justified\n \"\"\"\n \n def __init__(self, data, w) :\n self.paragraphs = self.getP(data)\n self.width = w\n self.text_justified = []\n\n def getP(self, data) :\n fo = open(\"input.txt\", \"w\")\n fo.write(data)\n fo.close()\n file = open('input.txt', 'r')\n data = file.readlines()\n paragraphs, new = [], True\n for d in data :\n if d == '' or re.match(\"^[ \\t\\n\\r\\f\\v]+$\", d) :\n new = True\n continue\n else :\n if new :\n paragraphs.append(d)\n new = False\n else :\n paragraphs[len(paragraphs) - 1] += d\n file.close()\n return paragraphs\n \n def resolve(self) :\n for paragrah in self.paragraphs :\n words = paragrah.split()\n len_words = np.zeros(len(words), dtype = np.int64)\n i = 0\n for word in words :\n if word.find('\\n') != -1 :\n word = word[0:-2]\n if word.find(' \\n') != -1 :\n word = word[0:-3]\n len_words[i] = len(word)\n i += 1\n self.handleSolution(self.textJustify(len_words), len(words), words)\n \n \n def handleSolution(self, solution, n, words) :\n line = []\n col = 0\n if solution[n] == 1 :\n k = 1\n else :\n k = self.handleSolution(solution, solution[n] - 1, words) + 1\n \n for i in range(solution[n] - 1, n) :\n line.append(words[i])\n col += len(words[i]) + 1\n \n if len(line) == 1 :\n self.text_justified.append(' '.join(line).ljust(self.width))\n else :\n q, r = divmod(self.width - col + 1, len(line) - 1)\n if r == 0 :\n self.text_justified.append((' ' * (q + 1)).join(line))\n else :\n if n == len(words) :\n self.text_justified.append((' ' * (q + 1)).join(line))\n else :\n self.text_justified.append((' ' * (q + 2)).join(line[:r] + [(' ' * (q + 1)).join(line[r:])]))\n return k\n \n \n def textJustify(self, len_words) :\n L = len(len_words) + 1\n over_spaces = np.zeros((L, L), dtype = np.int64)\n line_cost = np.zeros((L, L), dtype = np.float64)\n opt_cost = np.zeros(L, dtype = np.float64)\n solution = np.zeros(L, dtype = np.int64)\n \n # Over spaces of each line from i to j\n for i in range(1, L) :\n over_spaces[i][i] = self.width - len_words[i - 1]\n for j in range(i + 1, L) :\n over_spaces[i][j] = over_spaces[i][j - 1] - len_words[j - 1] - 1\n \n # Line cost of each line from i to j\n for i in range(1, L) :\n for j in range(i, L) :\n if over_spaces[i][j] < 0 :\n line_cost[i][j] = float('inf')\n elif j == L - 1 and over_spaces[i][j] >= 0 :\n line_cost[i][j] = 0\n else :\n line_cost[i][j] = over_spaces[i][j] ** 2\n \n # Optimal cost to insert words on each line from i to j\n for j in range(1, L) :\n opt_cost[j] = float('inf')\n for i in range(1, j + 1) :\n if opt_cost[i-1] != float('inf') and line_cost[i][j] != float('inf') and opt_cost[i-1] + line_cost[i][j] < opt_cost[j] :\n opt_cost[j] = opt_cost[i-1] + line_cost[i][j]\n solution[j] = i\n \n return solution","sub_path":"text_justify.py","file_name":"text_justify.py","file_ext":"py","file_size_in_byte":4027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"586964781","text":"# %load q08_get_total_extras/build.py\n# Default Imports\nfrom greyatomlib.python_intermediate.q05_read_csv_data.build import read_ipl_data_csv\nimport numpy as np\nimport csv\n\npath = 'data/ipl_matches_small.csv'\n\ndef get_total_extras():\n with open(path,'r') as f:\n reader=csv.reader(f, delimiter=',')\n header=next(reader)\n data=list(reader)\n arr=np.array(data)\n extras=np.hstack(arr[0:,17:18])\n arr=np.array(extras, dtype=np.int32)\n data=list(arr)\n return np.sum(data)\nget_total_extras()\n\n\n","sub_path":"q08_get_total_extras/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"218547998","text":"# github source:\r\n# https://gist.github.com/savarin/69acd246302567395f65ad6b97ee503d?fbclid=IwAR2K_aHB4eblpe2Zj2fAqDmpiaO3Izf_otudsr3KZGmeJBmyp1ugVn-cpws\r\nclass Node(object):\r\n '''\r\n Base node object.\r\n Each node stores keys and values. Keys are not unique to each value, and\r\n as such values are stored as a list under each key.\r\n Attributes:\r\n order (int): The maximum number of keys each node can hold.\r\n '''\r\n def __init__(self, order):\r\n self.order = order\r\n self.keys = []\r\n self.values = []\r\n self.leaf = True\r\n\r\n def add(self, key, value):\r\n '''\r\n Adds a key-value pair to the node.\r\n '''\r\n if not self.keys:\r\n self.keys.append(key)\r\n self.values.append([value])\r\n return None\r\n\r\n for i, item in enumerate(self.keys):\r\n if key == item:\r\n self.values[i].append(value)\r\n break\r\n\r\n elif key < item:\r\n self.keys = self.keys[:i] + [key] + self.keys[i:]\r\n self.values = self.values[:i] + [[value]] + self.values[i:]\r\n break\r\n\r\n elif i + 1 == len(self.keys):\r\n self.keys.append(key)\r\n self.values.append([value])\r\n break\r\n\r\n def split(self):\r\n '''\r\n Splits the node into two and stores them as child nodes.\r\n '''\r\n left = Node(self.order)\r\n right = Node(self.order)\r\n mid = int(self.order / 2)\r\n\r\n left.keys = self.keys[:mid]\r\n left.values = self.values[:mid]\r\n\r\n right.keys = self.keys[mid:]\r\n right.values = self.values[mid:]\r\n\r\n self.keys = [right.keys[0]]\r\n self.values = [left, right]\r\n self.leaf = False\r\n\r\n def is_full(self):\r\n '''\r\n Returns True if the node is full.\r\n '''\r\n return len(self.keys) == self.order\r\n\r\nclass BPlusTree(object):\r\n '''\r\n B+ tree object, consisting of nodes.\r\n Nodes will automatically be split into two once it is full. When a split\r\n occurs, a key will 'float' upwards and be inserted into the parent node to\r\n act as a pivot.\r\n Attributes:\r\n order (int): The maximum number of keys each node can hold.\r\n '''\r\n def __init__(self, order=8):\r\n self.root = Node(order)\r\n\r\n def _find(self, node, key):\r\n '''\r\n For a given node and key, returns the index where the key should be\r\n inserted and the list of values at that index.\r\n '''\r\n for i, item in enumerate(node.keys):\r\n if key < item:\r\n return node.values[i], i\r\n\r\n return node.values[i + 1], i + 1\r\n\r\n def _merge(self, parent, child, index):\r\n '''\r\n For a parent and child node, extract a pivot from the child to be\r\n inserted into the keys of the parent. Insert the values from the child\r\n into the values of the parent.\r\n '''\r\n parent.values.pop(index)\r\n pivot = child.keys[0]\r\n\r\n for i, item in enumerate(parent.keys):\r\n if pivot < item:\r\n parent.keys = parent.keys[:i] + [pivot] + parent.keys[i:]\r\n parent.values = parent.values[:i] + child.values + parent.values[i:]\r\n break\r\n\r\n elif i + 1 == len(parent.keys):\r\n parent.keys += [pivot]\r\n parent.values += child.values\r\n break\r\n\r\n def insert(self, key, value):\r\n '''\r\n Inserts a key-value pair after traversing to a leaf node. If the leaf\r\n node is full, split the leaf node into two.\r\n '''\r\n parent = None\r\n child = self.root\r\n\r\n while not child.leaf:\r\n parent = child\r\n child, index = self._find(child, key)\r\n\r\n child.add(key, value)\r\n\r\n if child.is_full():\r\n child.split()\r\n\r\n if parent and not parent.is_full():\r\n self._merge(parent, child, index)\r\n\r\n def retrieve(self, key):\r\n '''\r\n Returns a value for a given key, and None if the key does not exist.\r\n '''\r\n child = self.root\r\n\r\n while not child.leaf:\r\n child, index = self._find(child, key)\r\n\r\n for i, item in enumerate(child.keys):\r\n if key == item:\r\n return child.values[i]\r\n\r\n return None\r\n","sub_path":"Python Files/bplus_tree.py","file_name":"bplus_tree.py","file_ext":"py","file_size_in_byte":4392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"57570687","text":"# https://www.codechef.com/LRNDSA02/problems/PSHOT\n\nfor _ in range(int(input())):\n n = int(input())\n s = input()\n\n remaining_a = remaining_b = n\n score_a = score_b = 0\n for i, ch in enumerate(s, 1):\n if i % 2 == 1:\n score_a += int(ch)\n remaining_a -= 1\n else:\n score_b += int(ch)\n remaining_b -= 1\n\n max_score_b = score_b + remaining_b\n max_score_a = score_a + remaining_a\n\n if score_a > max_score_b or score_b > max_score_a or i == 2 * n:\n print(i)\n break\n","sub_path":"src/greedy/penalty-shootout-2.py","file_name":"penalty-shootout-2.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"651986839","text":"import os\nimport cv2\n\n\n#images_file = '/home/xjyu/kgduan/yolo_v3/darknet/tools/train_list/train.txt'\n#images_file = '/home/xjyu/kgduan/yolo_v3/darknet/results/img_list_from_zpark_0511.txt'\nimages_file = '/home/xjyu/kgduan/yolo_v3/darknet/tools/train_list/train.txt'\nwith open(images_file) as f:\n images = f.readlines()\n\nprint(len(images))\n\ncount = 0\nfor i in images:\n img = cv2.imread(i.strip('\\n'))\n height, width, channel = img.shape\n #print 'height, width', height, width\n print(i)\n #if height > 1080 or width > 1920:\n #img = cv2.resize(img, (int(width / 3), int(height / 3)))\n #if height > width:\n #count += 1\n #cv2.imshow(\"img\", img)\n #cv2.waitKey(0)\n\nprint(count)\n\n","sub_path":"darknet_tools/read_img.py","file_name":"read_img.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"245101674","text":"#Runtime 166ms, Beats 53.47%\n#Basic idea is claculate the product of left elements and then product the product of right elements\nclass Solution(object):\n def productExceptSelf(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n output = [1]\n lens = len(nums)\n for i in xrange(1,lens):\n output.append(output[i-1]*nums[i-1])\n cur = 1\n for i in xrange(lens):\n output[lens-1-i] *= cur\n cur *= nums[lens-1-i]\n return output\n \n","sub_path":"Array/238.Product of Array Except Self.py","file_name":"238.Product of Array Except Self.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"304106951","text":"from django.shortcuts import render, redirect\n\nfrom products.models import Product\nfrom .models import Cart\n# Create your views here.\ndef create_cart(user = None):\n cart_obj = Cart.objects.create(user=None)\n return cart_obj\ndef carts_home(request):\n # print(request.session.session_key)\n # print(dir(request.session))\n #request.session['username'] =\n #del request.session['cart_id']\n #cart_id = request.session.get('cart_id',None)\n # if cart_id is None:# and isinstance(cart_id,int):\n # #print('Create new cart')\n # #request.session['cart_id'] = request.user.username\n # cart_obj = create_cart()#Cart.objects.create(user=None)\n # request.session['cart_id'] = cart_obj.id\n # else:\n #print(('cart is created'))\n #print(cart_id)\n # OR\n # query = Cart.objects.filter(id=cart_id)\n # if query.count() ==1:\n # cart_obj = query.first()\n # if request.user.is_authenticated and cart_obj.user is None:\n # cart_obj.user = request.user\n # cart_obj.save()\n # else:\n # cart_obj = Cart.objects.new(user=request.user)\n # request.session['cart_id'] = cart_obj.id\n #OR\n cart_obj, new_obj = Cart.objects.new_or_get(request)\n # products = cart_obj.products.all()\n # total = 0\n # for product in products:\n # total += product.price\n # print(total)\n # cart_obj.total = total\n # cart_obj.save()\n context = {\n 'cart':cart_obj\n }\n return render(request,'carts/home.html',context)\n\n\ndef cart_update(request):\n id = request.POST.get('product_id')\n #print(id)\n if id is not None:\n try:\n product_obj = Product.objects.get(id=id)\n except Product.DoesNotExist:\n print('Product out of stack!')\n return redirect('carts_home')\n cart_obj,new_obj = Cart.objects.new_or_get(request)\n if product_obj in cart_obj.products.all():\n cart_obj.products.remove(product_obj)\n else:\n cart_obj.products.add(product_obj)\n\n #return redirect(product_obj.get_absolute_url())\n return redirect('carts_home')\n","sub_path":"carts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"258393138","text":"\"\"\"\nThis tool provides similar utility and output to ecat images that dcmdump from the dcmtk does for dicom images.\n\"\"\"\nfrom setuptools import find_packages, setup\n\ndependencies = ['nibabel']\n\nversion_number = '0.2.6'\n\nsetup(\n name='ecatdump',\n version=version_number,\n url='https://github.com/bendhouseart/ecatdump',\n license='BSD',\n author='Anthony Galassi',\n author_email='28850131+bendhouseart@users.noreply.github.com',\n description='This tool provides similar utility and output to ecat images \\\n that dcmdump from the dcmtk does for dicom images.',\n long_description=__doc__,\n packages=find_packages(exclude=['tests', '.git']),\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n install_requires=dependencies,\n entry_points={\n 'console_scripts': [\n 'ecatdump = ecatdump.cli:main',\n ],\n },\n classifiers=[\n # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n # 'Development Status :: 1 - Planning',\n # 'Development Status :: 2 - Pre-Alpha',\n # 'Development Status :: 3 - Alpha',\n 'Development Status :: 4 - Beta',\n # 'Development Status :: 5 - Production/Stable',\n # 'Development Status :: 6 - Mature',\n # 'Development Status :: 7 - Inactive',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: POSIX',\n 'Operating System :: MacOS',\n 'Operating System :: Unix',\n 'Operating System :: Microsoft :: Windows',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"880073","text":"from collections import defaultdict\nclass Solution:\n def sequentialDigits(self, low, high):\n def gen_digits(num):\n if num > high:\n return\n if num >= low:\n res.append(num)\n tmp = int(str(num)[1:])\n if tmp%10 < 9:\n tmp = tmp * 10 + ((tmp % 10) + 1)\n else:\n tmp = 0\n for i in range(1, len(str(num))+2):\n tmp = tmp * 10 + i\n gen_digits(tmp)\n\n tmp = str(low)\n first = int(tmp[0])\n print(\"first\", first)\n for i in range(first+1, first + len(tmp)):\n if i > 9:\n first = 0\n for j in range(1, len(str(low))+2):\n first = first * 10 + j\n break\n first = first * 10 + (i)\n\n res = []\n gen_digits(first)\n return res\n \n \n\nif __name__ == '__main__':\n sol = Solution()\n \n low = 1000\n high = 13000\n \n low = 8511\n high = 23553\n\n print(low, high)\n r = sol.sequentialDigits(low, high)\n print(r)","sub_path":"lc_1291_sequential_digits.py","file_name":"lc_1291_sequential_digits.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"387337288","text":"#coding=utf-8\n\nimport unittest\n\n\"\"\"\n\n====== from ref ===>>>>>>>\n\nlocked\n\n157. READ N CHARACTERS GIVEN READ4\n\n\nThe API: int read4(char *buf) reads 4 characters at a time from a file.\nThe return value is the actual number of characters read. For example, it returns 3 if there is only 3 characters left in the file.\nBy using the read4 API, implement the function int read(char *buf, int n) that reads n characters from the file.\n\nNote:\nThe read function will only be called once for each test case.\n\n\nHint:\nConsider which one is smaller, read4(buf) or n - num.\n\n\n\n\n\"\"\"\n\n\n# The read4 API is already defined for you.\n# @param buf, a list of characters\n# @return an integer\n# def read4(buf):\n\nclass Solution:\n # @param buf, Destination buffer (a list of characters)\n # @param n, Maximum number of characters to read (an integer)\n # @return The number of characters read (an integer)\n def read(self, buf, n):\n \"\"\"\n Complexity:\n O(n) time\n O(1) space\n :param buf: \n :param n: \n :return: \n \"\"\"\n numBytes = 0\n\n while n > 0:\n buf4 = [None] * 4\n size = read4(buf4)\n minLen = min(size, n - numBytes)\n\n if minLen == 0:\n return numBytes\n\n for i in range(minLen):\n buf[numBytes] = buf4[i]\n numBytes += 1\n\n return numBytes\n\nclass SolutionTester(unittest.TestCase):\n def setUp(self):\n self.sol = Solution()\n\n def test_case1(self):\n nums = 1\n answer = 1\n result = self.sol.searchInsert(nums)\n self.assertEqual(answer, result)\n\n\ndef main():\n suite = unittest.TestLoader().loadTestsFromTestCase(SolutionTester)\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\n#-*- coding:utf-8 -*-\n\n\"\"\"\n\n157. Read N Characters Given Read4 Total Accepted: 17410 Total Submissions: 58870 Difficulty: Easy\nThe API: int read4(char *buf) reads 4 characters at a time from a file.\n\nThe return value is the actual number of characters read. For example, it returns 3 if there is only 3 characters left in the file.\n\nBy using the read4 API, implement the function int read(char *buf, int n) that reads n characters from the file.\n\nNote:\nThe read function will only be called once for each test case.\n\nHide Company Tags\n Facebook\nHide Tags\n String\nHide Similar Problems\n (H) Read N Characters Given Read4 II - Call multiple times\n思路:\n一开始没看懂题。看了好几个人的blog才知道是怎么回事。首先输入数组的长度我们是不知道的。\nread4这个API中的参数是读了的char,也就是每次read4之后返回的char[4]是从给定文件中读的。\n然后为啥是两参数?char[] buf和上面的一个道理,是输出的数组,而不是源文件;给int n是因为这个文件的长度可能比我们给的大,这样我们只需要读入N个字符到 char[] buf中;也有可能短,那么我们只能读完所有的文件并且返回文件长度。\n[java] view plain copy\n/* The read4 API is defined in the parent class Reader4. \n int read4(char[] buf); */ \n \npublic class Solution extends Reader4 { \n /** \n * @param buf Destination buffer \n * @param n Maximum number of characters to read \n * @return The number of characters read \n */ \n public int read(char[] buf, int n) { \n int count = 0; \n char[] mybuf = new char[4]; \n while(count buff = new LinkedList(); \n public int read(char[] buf, int n) { \n int total = 0; \n while(true){ \n char[] temp = new char[4]; \n int in = read4(temp); \n for(int i = 0; i < in; i++) buff.add(temp[i]); \n \n // 判断还需要写入多少个到结果, 比如读了4个,但是只要3个;或者要4个,只有3个了。 \n in = Math.min(n - total, buff.size()); \n \n for(int i = 0; i < in; i++) buf[total++] = buff.poll(); \n if(in == 0) break; \n } \n return total; \n } \n} \n\n\n\"\"\"","sub_path":"misc/read_n_characters_given_read4.py","file_name":"read_n_characters_given_read4.py","file_ext":"py","file_size_in_byte":6184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"632217213","text":"#!/usr/bin/python3\nimport socket\nimport sys\nimport os\nimport getopt\nimport time\n\n(opt, arg) = getopt.getopt(sys.argv[1:], 'a:p:')\n\nfor (op, ar) in opt:\n if op == '-a':\n a = str(ar)\n elif op == '-p':\n p = int(ar)\n print('Opcion -p exitosa!')\n\ntry:\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nexcept socket.error:\n print('Fallo al crear el socket!')\n sys.exit()\n\nprint('Socket Creado!')\n\nhost = a\nport = p\n\nclient.connect((host, port))\n\nprint('Socket conectado al host', host, 'en el puerto', port)\n\nwhile True:\n\n print(\"\"\"\\n\n \\t\\t\\t *** Menu ***\n - ABRIR\n - AGREGAR\n - LEER\n - CERRAR\n \"\"\")\n\n opcion = input('Opcion: ').upper()\n\n client.sendto(opcion.encode(), (host, port))\n\n if (opcion == 'ABRIR'):\n directorio = 'tmp/'\n try:\n os.stat(directorio)\n except:\n os.mkdir(directorio)\n print(client.recv(1024).decode())\n data = input()\n archivo = 'tmp/' + data + '.txt'\n client.sendto(archivo.encode(), (host, port))\n\n elif (opcion == 'AGREGAR'):\n print(client.recv(1024).decode())\n while True:\n msg = input()\n client.sendto(msg.encode(), (host, port))\n if msg == 'quit':\n break\n\n elif (opcion == 'LEER'):\n contenido = client.recv(1024).decode()\n print('\\nArchivo: ' + archivo + '\\n')\n print(contenido)\n input('Apretar Enter...')\n\n elif (opcion == 'CERRAR'):\n break\n\n else:\n print('\\nOpcion invalida!\\n')\n input('Apretar Enter...')\n\nclient.close()\n","sub_path":"ej29_cliente.py","file_name":"ej29_cliente.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"384927295","text":"# An activity selection problem\n#\n# Given activities with start and finish times, \n# select a maximum-size set of mutually compatible activities\n# Activities a and b are compatible if they do not overlap each other\n\n# Approach 1:\n# We shall solve the problem using dynamic programming\n# We decide to choose or not to choose for each activity.\n# DP would be an overkill for this problem, since Greedy approach\n# can solve it in O(n)\n\n# Approach 2:\n# Sort the activities by their finish time\n# start picking activities with earliest finish time\n# Greedy approach = local optimal solution leads to an optimal solution\n# Greedy = \"Choose the best now, fuck about the future\"\n\ndef greedy_activity_selector(s, f):\n\tn = len(s)\n\n\tA = set()\n\tA.add(0)\n\tk = 0\n\n\tfor m in range(1, n):\n\t\tif s[m] >= f[k]:\n\t\t\tA.add(m)\n\t\t\tk = m\n\treturn A\n\n\n\ns = [1, 3, 0, 5, 3, 5, 6, 8, 8, 2, 12] # start times\nf = [4, 5, 6, 7, 9, 9, 10, 11, 12, 14, 16]\t# finish times\nA = greedy_activity_selector(s, f)\nprint(A)","sub_path":"Ch16. Greedy Algorithms/1_activity_selection.py","file_name":"1_activity_selection.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"178142405","text":"\"\"\"Plot road network\n\"\"\"\nimport os\n\nimport cartopy.crs as ccrs\nimport geopandas\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\n\n\nfrom atra.utils import load_config, get_axes, plot_basemap, scale_bar, plot_basemap_labels, save_fig\n\n\ndef main(config):\n \"\"\"Read shapes, plot map\n \"\"\"\n data_path = config['paths']['data']\n\n # data\n output_file = os.path.join(config['paths']['figures'], 'network-road-map.png')\n road_edge_file_national = os.path.join(data_path, 'network', 'road_edges_national.shp')\n road_edge_file_provincial = os.path.join(data_path, 'network', 'road_edges_provincial.shp')\n\n # basemap\n proj_lat_lon = ccrs.PlateCarree()\n ax = get_axes()\n plot_basemap(ax, data_path)\n scale_bar(ax, location=(0.8, 0.05))\n plot_basemap_labels(ax, data_path, include_regions=False)\n\n colors = {\n 'National': '#ba0f03',\n 'Provincial': '#e0881f'\n }\n\n # edges\n edges_provincial = geopandas.read_file(road_edge_file_provincial)\n ax.add_geometries(\n list(edges_provincial.geometry),\n crs=proj_lat_lon,\n linewidth=1.25,\n edgecolor=colors['Provincial'],\n facecolor='none',\n zorder=4\n )\n\n edges_national = geopandas.read_file(road_edge_file_national)\n ax.add_geometries(\n list(edges_national.geometry),\n crs=proj_lat_lon,\n linewidth=1.25,\n edgecolor=colors['National'],\n facecolor='none',\n zorder=5\n )\n\n # legend\n legend_handles = [\n mpatches.Patch(color=color, label=label)\n for label, color in colors.items()\n ]\n plt.legend(handles=legend_handles, loc='lower left')\n\n # save\n save_fig(output_file)\n\n\nif __name__ == '__main__':\n CONFIG = load_config()\n main(CONFIG)\n","sub_path":"src/atra/plot/network_road.py","file_name":"network_road.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"441461998","text":"import os\nimport json\nfrom datetime import datetime\nimport logging\nimport subprocess\n\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils import timezone\nfrom django.conf import settings\nimport pytz\nimport boto3\n\nfrom awscredentialmgr.models import AWSProfile, AWSRegion\nfrom awsresourcemgr.models import AWSResource\nfrom .models import Module, UpdatePlan, UpdateStep, UpdateActionLog\nfrom boto3helper.ec2 import get_instances_by_filters, \\\n get_instance_module_version\nfrom boto3helper.tags import to_dict, get_name, get_resource_name\nfrom ec2mgr.ec2 import run_instances, add_instance_tags, add_volume_tags\nfrom ec2mgr.models import EC2Instance, Connector\nfrom openfalcon2 import openfalcon_login, openfalcon_logout, openfalcon_disable\n\nlogger = logging.getLogger('common')\n\ndef JSONResponse(obj, status=200):\n return HttpResponse(json.dumps(obj), content_type=\"application/json\", status=status)\n\n\n@login_required\ndef run_module_ec2(request):\n \"\"\"\n Run EC2 instances for an UpdateStep. The number of instances to run is\n determined by (instance_count - healthy_instance_count) of that\n module in the step.\n \"\"\"\n # read request and models:\n step = get_object_or_404(UpdateStep, pk=request.POST.get('step_id'))\n # record action log:\n actionlog = UpdateActionLog.create(\n request,\n update_plan = step.update_plan.first(),\n update_step = step,\n action = \"run_module_ec2\"\n )\n if step.finished:\n actionlog.set_result(False, \"Step already finished.\")\n actionlog.save()\n return JSONResponse(False)\n module = step.module\n # boto3 session:\n session = module.profile.get_session(module.region)\n ec2res = session.resource('ec2')\n # boto3 throws errors if instance_count is 0, so intercept it here:\n if module.launch_count == 0:\n actionlog.set_result(False, \"Launch number is 0.\")\n actionlog.save()\n return JSONResponse([])\n # run instances:\n try:\n instance_ids = []\n instances = run_instances(ec2res, module, module.launch_count)\n for instance in instances:\n ec2instance = EC2Instance(\n name=\"\",\n instance_id=instance.id,\n private_ip_address=instance.private_ip_address,\n key_pair=instance.key_pair.name,\n running_state=instance.state['Name'],\n service_status=\"not_ready\",\n note=\"Instance just started.\",\n instance_created=True,\n instance_tags_added=False,\n volume_tags_added=False,\n vpc_id=instance.vpc_id\n )\n ec2instance.save()\n module.instances.add(ec2instance)\n instance_ids.append(instance.id)\n except Exception as ex:\n actionlog.set_result(False, ex.message)\n actionlog.save()\n return HttpResponse(ex.message, status=500)\n actionlog.set_result(True, instance_ids)\n actionlog.save()\n # if module has no load balancers, set ELB flags in advance:\n if len(module.load_balancer_names) == 0:\n step.elb_registered = True\n step.elb_finished = True\n step.save()\n return JSONResponse(instance_ids)\n\n\n@login_required\ndef add_module_ec2_tags(request):\n step = get_object_or_404(UpdateStep, pk=request.POST.get('step_id'))\n # record action log:\n actionlog = UpdateActionLog.create(\n request,\n step.update_plan.first(),\n step,\n \"add_module_ec2_tags\"\n )\n if step.finished:\n actionlog.set_result(False, \"Step already finished.\")\n actionlog.save()\n return JSONResponse(False)\n module = step.module\n instance_ids = request.POST.getlist('instance_ids[]')\n if len(instance_ids) == 0:\n actionlog.set_result(False, \"No instances to add tags to.\")\n actionlog.save()\n return JSONResponse(\"No Instance ID.\")\n session = module.profile.get_session(module.region)\n ec2res = session.resource('ec2')\n\n try:\n result = add_instance_tags(ec2res, module, instance_ids)\n for instance_id in result:\n if result[instance_id]:\n ec2instance = EC2Instance.objects.get(instance_id=instance_id)\n ec2instance.instance_tags_added = True\n ec2instance.name = result[instance_id]\n ec2instance.save()\n except Exception as ex:\n actionlog.set_result(False, ex.message)\n actionlog.save()\n return HttpResponse(ex.message, status=500)\n actionlog.set_result(True, result)\n actionlog.save()\n return JSONResponse(result)\n\n\n\n@login_required\ndef add_module_volume_tags(request):\n step = get_object_or_404(UpdateStep, pk=request.POST.get('step_id'))\n # record action log:\n actionlog = UpdateActionLog.create(\n request,\n step.update_plan.first(),\n step,\n \"add_module_volume_tags\"\n )\n if step.finished:\n actionlog.set_result(False, \"Step already finished.\")\n actionlog.save()\n return JSONResponse(False)\n module = step.module\n instance_ids = request.POST.getlist('instance_ids[]')\n if len(instance_ids) == 0:\n actionlog.set_result(False, \"No instances to add tags to.\")\n actionlog.save()\n return JSONResponse(\"No Instance ID.\")\n session = module.profile.get_session(module.region)\n ec2res = session.resource('ec2')\n\n try:\n result = add_volume_tags(ec2res, instance_ids)\n for instance_id in result:\n if result[instance_id]:\n ec2instance = EC2Instance.objects.get(instance_id=instance_id)\n ec2instance.volume_tags_added = True\n ec2instance.save()\n except Exception as ex:\n actionlog.set_result(False, ex.message)\n actionlog.save()\n return HttpResponse(ex.message, status=500)\n actionlog.set_result(True, result)\n actionlog.save()\n return JSONResponse(result)\n\n\ndef stop_module_ec2_instances(ec2res, module):\n ret = {}\n for ec2instance in module.instances.all():\n instance = ec2res.Instance(ec2instance.instance_id)\n try:\n result = instance.stop()\n running_state = result['StoppingInstances'][0]['CurrentState']['Name']\n ret.update({instance.id: running_state})\n ec2instance.running_state = running_state\n ec2instance.service_status = \"down\"\n ec2instance.note = \"Instance not running\"\n ec2instance.save()\n except Exception as ex:\n ret.update({instance.id: 'error'})\n return ret\n\n\n@login_required\ndef stop_module_ec2(request):\n step = get_object_or_404(UpdateStep, pk=request.POST.get('step_id'))\n actionlog = UpdateActionLog.create(\n request,\n update_plan = step.update_plan.first(),\n update_step = step,\n action = \"stop_module_ec2\"\n )\n if step.finished:\n actionlog.set_result(False, \"Step already finished.\")\n actionlog.save()\n return JSONResponse(False)\n module = step.module\n #module = get_object_or_404(Module, pk=request.POST.get('module_id'))\n session = module.profile.get_session(module.region)\n ec2res = session.resource('ec2')\n\n #ret = stop_module_ec2_instances(ec2res, module)\n ret = module.mark_instances_for_stopping()\n actionlog.set_result(True, instance_ids)\n actionlog.save()\n return JSONResponse(ret)\n\n\n@login_required\ndef stop_module_previous_ec2(request):\n step = get_object_or_404(UpdateStep, pk=request.POST.get('step_id'))\n current_module = step.module\n module = current_module.previous_module\n actionlog = UpdateActionLog.create(\n request,\n update_plan = step.update_plan.first(),\n update_step = step,\n action = \"stop_module_previous_ec2\"\n )\n module = step.module\n module = module.previous_module\n\n # data check:\n if current_module.healthy_instance_count >= current_module.instance_count:\n step.ec2_launched = True\n step.save()\n\n if not step.ec2_launched:\n return JSONResponse((False, \"New version not launched, cannot stop old version.\"))\n if not step.elb_registered:\n return JSONResponse((False, \"New version instances not registered with ELBs, cannot stop old version.\"))\n if not step.elb_finished:\n return JSONResponse((False, \"Old version instances not deregistered from ELBs, cannot stop.\"))\n if step.finished:\n return JSONResponse(False)\n\n instances = module.instances.all()\n # EC2Instance database ids:\n ids = [str(instance.id) for instance in instances]\n # Script path:\n stop_script = os.path.sep.join([\n os.path.abspath(\n os.path.sep.join([\n os.path.dirname(\n os.path.abspath(__file__)\n ),\n '..'\n ])\n ),\n 'stop_ec2_instances.py'\n ])\n cmd = [\n 'python',\n stop_script,\n ]\n cmd += ids\n try:\n subprocess.Popen(cmd)\n except:\n actionlog.set_result(False, json.dumps(ids))\n actionlog.save()\n return JSONResponse(False)\n actionlog.set_result(True, json.dumps(ids))\n actionlog.save()\n return JSONResponse(True)\n\n\n@login_required\ndef reg_module_elb(request):\n step = get_object_or_404(UpdateStep, pk=request.POST.get('step_id'))\n actionlog = UpdateActionLog.create(\n request,\n update_plan = step.update_plan.first(),\n update_step = step,\n action = \"reg_module_elb\"\n )\n module = step.module\n\n # action check:\n ## previous action not finished:\n if module.healthy_instance_count < module.instance_count:\n actionlog.set_result(False, \"Instances not ready. Cannot register.\")\n actionlog.save()\n return JSONResponse(False)\n else:\n step.ec2_launched = True\n step.save()\n ## action already finished:\n if step.finished:\n actionlog.set_result(False, \"Step already finished.\")\n actionlog.save()\n return JSONResponse(False)\n\n session = module.profile.get_session(module.region)\n ec2res = session.resource('ec2')\n elbclient = session.client('elb')\n\n # Capital variables: boto3 naming style\n Instances = map(\n lambda x: {'InstanceId': x},\n #[instance.instance_id for instance in module.instances.all()]\n # edit: only register instances with services running:\n [instance.instance_id for instance in module.instances.filter(service_status='ok')]\n )\n ret = {}\n for LoadBalancerName in module.load_balancer_names.split(','):\n LoadBalancerName = LoadBalancerName.strip()\n try:\n elbclient.register_instances_with_load_balancer(\n LoadBalancerName=LoadBalancerName,\n Instances=Instances\n )\n ret.update({LoadBalancerName: True})\n except Exception as ex:\n logger.error(ex.message)\n ret.update({LoadBalancerName: False})\n step.elb_registered = True\n step.save()\n actionlog.set_result(True, ret)\n actionlog.save()\n return JSONResponse(ret)\n\n\n@login_required\ndef dereg_module_elb(request):\n step = get_object_or_404(UpdateStep, pk=request.POST.get('step_id'))\n actionlog = UpdateActionLog.create(\n request,\n update_plan = step.update_plan.first(),\n update_step = step,\n action = \"dereg_module_elb\"\n )\n # data check:\n if not step.elb_registered:\n actionlog.set_result(False, \"New instances not registered with ELBs, cannot deregister.\")\n actionlog.save()\n return JSONResponse(False)\n if step.finished:\n actionlog.set_result(False, \"Step already finished.\")\n actionlog.save()\n return JSONResponse(False)\n module = step.module.previous_module\n session = module.profile.get_session(module.region)\n ec2res = session.resource('ec2')\n elbclient = session.client('elb')\n\n Instances = map(\n lambda x: {'InstanceId': x},\n [instance.instance_id for instance in module.instances.all()]\n )\n ret = {}\n for LoadBalancerName in module.load_balancer_names.split(','):\n LoadBalancerName = LoadBalancerName.strip()\n try:\n elbclient.deregister_instances_from_load_balancer(\n LoadBalancerName=LoadBalancerName,\n Instances=Instances\n )\n ret.update({LoadBalancerName: True})\n except Exception as ex:\n logger.error(ex.message)\n ret.update({LoadBalancerName: False})\n # set ELB flags:\n step.elb_finished = True\n step.save()\n actionlog.set_result(True, ret)\n actionlog.save()\n return JSONResponse(ret)\n\n\n@login_required\ndef finish_step(request):\n step = get_object_or_404(UpdateStep, pk=request.POST.get('step_id'))\n actionlog = UpdateActionLog.create(\n request,\n update_plan = step.update_plan.first(),\n update_step = step,\n action = \"finish_step\"\n )\n module = step.module\n if step.finished:\n actionlog.set_result(False, \"Step already finished.\")\n actionlog.save()\n return JSONResponse((False, \"Step already finished.\"))\n result = step.check_finished()\n if not result[0]:\n actionlog.set_result(False, result[1])\n actionlog.save()\n return JSONResponse(result)\n step.set_finished()\n actionlog.set_result(True, \"\")\n actionlog.save()\n return JSONResponse((True, \"\"))\n\n\n@login_required\ndef check_module_elb_health(request):\n step = get_object_or_404(UpdateStep, pk=request.POST.get('step_id'))\n if step.finished:\n return JSONResponse(False)\n module = step.module\n session = module.profile.get_session(module.region)\n ec2res = session.resource('ec2')\n elbclient = session.client('elb')\n\n Instances = list(map(lambda x:{'InstanceId':x.instance_id},module.instances.all()))\n healthy_count = 0\n for LoadBalancerName in module.load_balancer_names.split(','):\n try:\n InstanceStates = elbclient.describe_instance_health(\n LoadBalancerName=LoadBalancerName,\n Instances=Instances\n )\n InstanceStates = InstanceStates['InstanceStates']\n for InstanceState in InstanceStates:\n if InstanceState['State'] == 'InService':\n healthy_count += 1\n except Exception as ex:\n logger.error(ex.message)\n return JSONResponse(False)\n return JSONResponse([healthy_count, module.instance_count])\n\n\n@login_required\ndef disable_module_alarm(request):\n # Get instance list:\n step = get_object_or_404(UpdateStep, pk=request.POST.get('step_id'))\n module = step.module\n module = module.previous_module\n instances = module.instances.all()\n try:\n session = openfalcon_login(\n settings.OPENFALCON['login_url'],\n settings.OPENFALCON['username'],\n settings.OPENFALCON['password'],\n settings.OPENFALCON['cert_file'],\n settings.OPENFALCON['cert_key'],\n False\n )\n except:\n return JSONResponse({'message': 'openfalcon login failed'}, status=500)\n result = openfalcon_disable(\n session,\n settings.OPENFALCON['switch_url'],\n instances\n )\n if not result:\n return JSONResponse({'message': 'openfalcon disable failed'}, status=500)\n openfalcon_logout(\n session,\n settings.OPENFALCON['logout_url']\n )\n return JSONResponse({'result': 'OK'})\n\n\n# kick devices views:\n@login_required\ndef deregister_connectors(request):\n step_id = request.POST.get('step_id')\n step = UpdateStep.objects.get(pk=step_id)\n elb_names = request.POST.getlist('elb_names[]')\n ids = request.POST.getlist('ids[]')\n instances = EC2Instance.objects.filter(id__in=ids)\n param_instance_ids = [{'InstanceId': i.instance_id} for i in instances]\n\n session = step.module.profile.get_session(step.module.region)\n elb = session.client('elb')\n\n for elb_name in elb_names:\n elb.deregister_instances_from_load_balancer(\n LoadBalancerName=elb_name,\n Instances=param_instance_ids\n )\n\n return JSONResponse([step_id, elb_names, ids])\n\n\n\n\n@login_required\ndef get_connector_device_numbers(request):\n ids = request.GET.getlist('ids[]')\n instances = EC2Instance.objects.filter(id__in=ids)\n connectors = list()\n for instance in instances:\n connectors.append(Connector(instance, instance.modules.first().name))\n for connector in connectors:\n connector.get_online_device_number()\n ret = [c.to_dict() for c in connectors]\n return JSONResponse(ret)\n\n\n@login_required\ndef init_connector_close_all(request):\n ids = request.POST.getlist('ids[]')\n instances = EC2Instance.objects.filter(id__in=ids)\n connectors = list()\n for instance in instances:\n connectors.append(Connector(instance, instance.modules.first().name))\n for connector in connectors:\n connector.get_online_device_number()\n for connector in connectors:\n connector.close_all_connections()\n ret = [c.to_dict() for c in connectors]\n return JSONResponse(ret)\n\n\n@login_required\ndef dereg_connectors_from_lbs(request):\n ids = request.POST.getlist('ids[]')\n instances = EC2Instance.objects.filter(id__in=ids)\n\n","sub_path":"PrdDeployer/updateplanmgr/ajax_views.py","file_name":"ajax_views.py","file_ext":"py","file_size_in_byte":17530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"635256482","text":"from fastapi import FastAPI\nimport uvicorn\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom core.config import setting\nfrom api.api import api_router\n\n#declare app\napp = FastAPI()\n#allow access on all routes\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\napp = FastAPI()\napp.include_router(api_router)\n\nif __name__==\"__main__\":\n uvicorn.run(\"main:app\",host = setting.HOST, port=setting.PORT)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"165409248","text":"import re\nfrom enum import IntEnum, unique\n\nfrom src.util import Util\n\n\n@unique\nclass Token(IntEnum):\n IDENTIFIER = 0\n CONSTANT = 1\n\n ARRAY = 3\n INT = 4\n CHAR = 5\n FLOAT = 6\n\n WHILE = 10\n DO = 11\n BEGIN = 12\n END = 13\n IF = 14\n THEN = 15\n ELSE = 16\n\n INPUT = 20\n PRINT = 21\n\n NEWLINE = 25\n LEFT_SQB = 26 # [\n RIGHT_SQB = 27 # ]\n LEFT_PAR = 28 # (\n RIGHT_PAR = 29 # )\n LESS = 30 # <\n GREATER = 31 # >\n LESS_EQUAL = 32 # <=\n GREATER_EQUAL = 33 # >=\n EQ_EQUAL = 34 # ==\n NOT_EQUAL = 35 # !=\n STAR = 36 # *\n SLASH = 37 # /\n PLUS = 38 # +\n MINUS = 39 # -\n EQUAL = 40 # =\n COMMA = 41 # ,\n\n\nclass TokenStrings:\n KEYWORDS = {'int', 'float', 'char', 'array', 'while', 'do', 'begin', 'end', 'if', 'then', 'else', 'input', 'print'}\n OTHER_TOKENS = {'[', ']', '(', ')', '<', '>', '<=', '>=', '==', '!=', '*', '/', '+', '=', ','}\n\n _TOKENS_STRING_MAP = {\n 'array': Token.ARRAY,\n 'int': Token.INT,\n 'char': Token.CHAR,\n 'float': Token.FLOAT,\n\n 'while': Token.WHILE,\n 'do': Token.DO,\n 'begin': Token.BEGIN,\n 'end': Token.END,\n 'if': Token.IF,\n 'then': Token.THEN,\n 'else': Token.ELSE,\n\n 'input': Token.INPUT,\n 'print': Token.PRINT,\n\n '\\n': Token.NEWLINE,\n '[': Token.LEFT_SQB ,\n ']': Token.RIGHT_SQB,\n '(': Token.LEFT_PAR,\n ')': Token.RIGHT_PAR,\n '<': Token.LESS,\n '>': Token.GREATER,\n '<=': Token.LESS_EQUAL,\n '>=': Token.GREATER_EQUAL,\n '==': Token.EQ_EQUAL,\n '!=': Token.NOT_EQUAL,\n '*': Token.STAR,\n '/': Token.SLASH,\n '+': Token.PLUS,\n '-': Token.MINUS,\n '=': Token.EQUAL,\n ',': Token.COMMA\n }\n\n @staticmethod\n def get_token_for_string(string):\n \"\"\"\n Get the mapping from a string literal to a token\n :param string:\n :return: the Token type or None if the string is not found\n \"\"\"\n return TokenStrings._TOKENS_STRING_MAP.get(string)\n\n @staticmethod\n def is_valid_identifier(string):\n return string and string not in TokenStrings.KEYWORDS \\\n and string.isalnum() and not string[0].isdigit()\n\n @staticmethod\n def is_valid_number_constant(string):\n if not string:\n return\n\n converted = Util.convert_to_int(string)\n if converted is None:\n converted = Util.convert_to_float(string)\n\n return converted is not None\n\n\n\n","sub_path":"compilers/labs/gabi-university/Compilers/src/step2/tokens.py","file_name":"tokens.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"648994177","text":"import string\n\nfrom es import ElasticSearch\nfrom nlp.cleaner import Cleaner\nfrom nlp.translate import Translate\n\nDEFAULT = [\n \"Sorry, I don't get what you're saying, can you try again?\",\n \"I don't quite follow, could you try repeating that in a different way?\",\n \"I'm not sure if I remember that happening in the Fast & Furious movies...\",\n \"Are you describing a specific scene? Which characters were involved?\",\n \"You're interesting...Out of curiosity, what is your favourite Fast & Furious movie?\",\n \"I don't understand, but I'm interested, what do you think Dom would say if you said that to him?\",\n \"Well...I'm not sure how to respond. Let's change the topic - What do you think about Tyrese?\",\n \"I don't know how to follow that up haha. To change the topic - What do you think about Dom?\",\n \"Intriguing, but let's get back on topic - What do you think about Paul Walker?\"\n]\n\nENDING = [\n \", but why are we talking about this instead of the Fast and Furious movie franchise?\",\n \". I'm not sure how to respond to that. Let's change the topic - What do you think about Tyrese?\",\n \". Intriguing, but let's get back on topic - What is your favourite Fast and Furious movie?\",\n \", I don't quite follow, could you try repeating that in a different way?\",\n]\n\n\nclass Bot:\n \"\"\"\n The main bot class\n \"\"\"\n\n def __init__(self):\n self.cleaner = Cleaner\n self.es = ElasticSearch\n self.translate = Translate\n\n def ask(self, raw_input_string):\n \"\"\"\n :param raw_input_string: Users question as raw string\n :return: Bots response as string\n \"\"\"\n translated_input_string, lang = self.translate.translate_to_english(raw_input_string)\n query = self.cleaner.clean(translated_input_string)\n results = self.es.search(query)\n\n if len(results) > 0:\n response = results[0][\"_source\"]['response']\n response, src_lang = self.translate.translate_to_lang(response, lang)\n return response\n\n return self._get_default_response(query, lang)\n\n def _get_default_response(self, cleaned_query: string, lang: string):\n from random import randint\n response = self._get_wikipedia_summary(cleaned_query)\n\n if response is None:\n response = DEFAULT[randint(0, len(DEFAULT) - 1)]\n else:\n response += ENDING[randint(0, len(ENDING) - 1)]\n\n return self.translate.translate_to_lang(response, lang)[0]\n\n @staticmethod\n def _get_wikipedia_summary(query):\n from nlp.tokenizer import Tokenizer\n from nlp.pos_tagger import POSTagger\n\n tokenized = Tokenizer.tokenize(query)\n tagged_tokens = POSTagger.tag(tokenized)\n page_title = next((noun[0] for noun in tagged_tokens if noun[1] == 'NN'), None)\n\n if page_title is None:\n return None\n\n from .wikipedia import Wikipedia\n wikipedia = Wikipedia()\n return wikipedia.get_page_summary_intro(page_title)\n","sub_path":"bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"310591790","text":"\r\nimport torch.nn as nn\r\nfrom torch.nn import functional as F\r\n\r\n\r\nclass RegLoss(nn.Module):\r\n def __init__(self, num_class, momentum=0.9):\r\n super(RegLoss,self).__init__()\r\n self.dict = dict(zip(range(num_class),[None]*num_class))\r\n self.momentum = momentum\r\n \r\n def forward(self, outputs, lbls):\r\n res = []\r\n for lbl in lbls.unique():\r\n vec = outputs[lbls==lbl]\r\n mean = vec.mean(dim=0)\r\n if self.dict[lbl.item()] is None:\r\n self.dict[lbl.item()] = mean\r\n else:\r\n self.dict[lbl.item()] = self.dict[lbl.item()] *(1-self.momentum)\\\r\n + mean *self.momentum\r\n \r\n res.append(((vec-mean)**2).sum()/vec.shape[0])\r\n return sum(res)/lbls.unique().shape[0]\r\n \r\n\r\nclass Loss(nn.CrossEntropyLoss):\r\n def __init__(self, num_class, reg_loss = True, weight=None, \r\n size_average=None, ignore_index=-100,\r\n reduce=None, reduction='mean'):\r\n super(Loss, self).__init__(weight=weight, size_average=size_average, ignore_index=ignore_index,\r\n reduce=reduce, reduction=reduction)\r\n self.reg_loss = RegLoss(num_class) if reg_loss else None\r\n\r\n\r\n def forward(self, preds, lbl):\r\n cls_loss = super(Loss, self).forward(preds, lbl)\r\n if self.reg_loss is not None:\r\n reg_loss = self.reg_loss(preds, lbl)\r\n return {\"total\":cls_loss+0.4*reg_loss, \"cls_loss\":cls_loss,\"reg_loss\":reg_loss}\r\n else:\r\n return {\"total\":cls_loss, \"cls_loss\":cls_loss,}\r\n\r\n\r\n","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"235716155","text":"import tempfile\n\nfrom django.core.management import call_command\nfrom django.test import TestCase\nfrom django.core.management.base import CommandError\n\nfrom ..models import Category, Channel\n\n\nclass ImportCategoriesTestCase(TestCase):\n\n def setUp(self):\n self.csv_file = tempfile.NamedTemporaryFile(mode='w')\n self.csv_file_name = self.csv_file.name\n\n def test_import_categories_command_exists(self):\n try:\n call_command(\n 'importcategories', 'channel_name', self.csv_file_name)\n except CommandError:\n self.fail('importcategories command does not exists')\n\n def test_it_should_create_new_channel_if_it_does_not_exists(self):\n new_channel_name = 'walmart'\n\n call_command(\n 'importcategories', new_channel_name, self.csv_file_name)\n\n self.assertTrue(\n Channel.objects.filter(name=new_channel_name).exists())\n\n def test_do_not_create_new_channel_if_csv_file_does_not_exists(self):\n new_channel_name = 'walmart'\n try:\n call_command(\n 'importcategories', new_channel_name, 'idonotexit.csv')\n except:\n pass\n\n self.assertFalse(\n Channel.objects.filter(name=new_channel_name).exists())\n\n def test_need_required_arguments(self):\n with self.assertRaises(CommandError) as error:\n call_command('importcategories')\n self.assertIn('channel', error.exception.args[0])\n self.assertIn('csv_file', error.exception.args[0])\n\n def test_must_pass_if_csv_file_exists(self):\n try:\n call_command(\n 'importcategories', 'channel', self.csv_file_name)\n except:\n self.fail('should not raise error if CSV file exists')\n\n def test_must_fail_if_csv_file_doesnot_exists(self):\n with self.assertRaises(CommandError):\n call_command(\n 'importcategories', 'channel', 'idonotexist.csv')\n\n def test_must_import_category_ignoring_header_line(self):\n self.csv_file.writelines([\n 'Category\\n'\n 'Books\\n'\n ])\n self.csv_file.seek(0)\n\n call_command(\n 'importcategories', 'channel_name', self.csv_file_name)\n\n self.assertEqual(Category.objects.count(), 1)\n self.assertEqual(Category.objects.all()[0].name, 'Books')\n\n def test_must_create_all_categories(self):\n self.csv_file.writelines([\n 'Category\\n',\n 'Books\\n',\n 'Books / National Literature\\n',\n 'Books / National Literature / Science Fiction\\n',\n ])\n self.csv_file.seek(0)\n\n call_command(\n 'importcategories', 'channel_name', self.csv_file_name)\n\n self.assertEqual(Category.objects.count(), 3)\n\n self.assertTrue(Category.objects.filter(name='Books').exists())\n self.assertTrue(Category.objects.filter(name='National Literature').exists())\n self.assertTrue(Category.objects.filter(name='Science Fiction').exists())\n\n def test_must_create_category_tree(self):\n self.csv_file.writelines([\n 'Category\\n',\n 'Books\\n',\n 'Books / National Literature\\n',\n 'Books / National Literature / Science Fiction\\n',\n ])\n self.csv_file.seek(0)\n\n call_command(\n 'importcategories', 'channel_name', self.csv_file_name)\n\n level0 = Category.objects.get(name='Books')\n level1 = Category.objects.get(name='National Literature')\n level2 = Category.objects.get(name='Science Fiction')\n\n self.assertEqual(level0.parent, None)\n self.assertEqual(level1.parent, level0)\n self.assertEqual(level2.parent, level1)\n\n def test_category_must_be_unique_in_its_level(self):\n self.csv_file.writelines([\n 'Category\\n',\n 'Games\\n',\n 'Games / XBOX 360\\n',\n 'Games / XBOX 360 / Games\\n',\n 'Games / XBOX 360 / Games\\n',\n ])\n self.csv_file.seek(0)\n\n call_command(\n 'importcategories', 'channel_name', self.csv_file_name)\n\n self.assertEqual(Category.objects.count(), 3)\n self.assertEqual(Category.objects.filter(name='Games').count(), 2)\n\n def test_multiples_levels_of_hierarchy(self):\n self.csv_file.writelines([\n 'Category\\n',\n 'Books\\n',\n 'Books / National Literature\\n',\n 'Books / National Literature / Science Fiction\\n',\n 'Books / National Literature / Fiction Fantastic\\n',\n 'Games\\n',\n 'Games / XBOX 360\\n',\n 'Games / XBOX 360 / Games\\n',\n 'Games / XBOX 360 / Games\\n',\n 'Games / XBOX 360 / Games / RPG\\n',\n ])\n self.csv_file.seek(0)\n\n call_command(\n 'importcategories', 'channel_name', self.csv_file_name)\n\n self.assertEqual(Category.objects.count(), 8)\n\n def test_categories_may_have_any_number_of_levels(self):\n self.csv_file.writelines([\n 'Category\\n',\n 'Games\\n',\n 'Games / PC\\n',\n 'Games / PC / Games / RPG\\n',\n 'Games / PC / Games / RPG / Medieval / English Language / Open Source\\n',\n ])\n self.csv_file.seek(0)\n\n call_command(\n 'importcategories', 'channel_name', self.csv_file_name)\n\n self.assertEqual(Category.objects.count(), 7)\n\n def test_must_clear_existing_categories_from_channel(self):\n channel = Channel.objects.create(name='channel_name')\n Category.objects.create(name='Old Category', channel=channel)\n\n self.csv_file.writelines([\n 'Category\\n',\n 'Games\\n',\n 'Games / PC\\n',\n ])\n self.csv_file.seek(0)\n\n call_command(\n 'importcategories', 'channel_name', self.csv_file_name)\n\n self.assertEqual(Category.objects.count(), 2)\n self.assertFalse(Category.objects.filter(name='Old Category').exists())\n self.assertTrue(Category.objects.filter(name='Games').exists())\n self.assertTrue(Category.objects.filter(name='PC').exists())\n\n def test_import_correctly_even_if_categories_are_not_sorted(self):\n self.csv_file.writelines([\n 'Category\\n',\n 'Games\\n',\n 'Books / National Literature\\n',\n 'Books\\n',\n 'Games / PC\\n',\n 'Books / National Literature / Science Fiction\\n',\n 'Games / PC / Games / RPG / Medieval / English Language / Open Source\\n',\n 'Books / National Literature / Fiction Fantastic\\n',\n ])\n self.csv_file.seek(0)\n\n call_command(\n 'importcategories', 'channel_name', self.csv_file_name)\n\n self.assertEqual(Category.objects.count(), 11)\n\n def test_must_not_affect_categories_of_different_channels(self):\n channel0 = Channel.objects.create(name='channel_name_0')\n Category.objects.create(name='Games', channel=channel0)\n\n channel1 = Channel.objects.create(name='channel_name_1')\n Category.objects.create(name='Games', channel=channel1)\n\n self.csv_file.writelines([\n 'Category\\n',\n 'No Games\\n',\n ])\n self.csv_file.seek(0)\n\n call_command(\n 'importcategories', channel1.name, self.csv_file_name)\n\n self.assertEqual(Category.objects.count(), 2)\n self.assertTrue(Category.objects.filter(name='Games', channel=channel0).exists())\n self.assertFalse(Category.objects.filter(name='Games', channel=channel1).exists())\n self.assertTrue(Category.objects.filter(name='No Games', channel=channel1).exists())\n","sub_path":"work-at-olist/categories/tests/test_importcategories.py","file_name":"test_importcategories.py","file_ext":"py","file_size_in_byte":7676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"205928584","text":"filme1 = 'O Magico de Oz'\nfilme2 = 'Seguranca no Museu'\nfilme3 = 'O Senhor dos Aneis'\n\nfilmes = [filme1, filme2, filme3]\n\ncarro1 = 'Mazda'\ncarro2 = 'Lamborguini Murcielago'\ncarro3 = 'Ferrari'\ncarro4 = 'Porche'\ncarro5 = 'BMW'\n\ncarros = [carro1, carro2, carro3, carro4, carro5]\n\ncidade1 = 'Sao Paulo'\ncidade2 = 'Rio de Janeiro'\ncidade3 = 'Ceara'\ncidade4 = 'San Francisco'\ncidade5 = 'Los Angeles'\ncidade6 = 'Michigan'\ncidade7 = 'Ohio'\ncidade8 = 'Cairo'\ncidade9 = 'Tokio'\ncidade10 = 'Dubai'\n\ncidades = [cidade1, cidade2, cidade3, cidade4, cidade5,\n cidade6, cidade7, cidade8, cidade9, cidade10]\n\ndic = {'Carro': 'Veiculo que e usado para a locomocao',\n 'casa': 'local de habitacao',\n 'Cidade': 'Lugar estabelecido no mapa'\n }\n\n\ndef imprime_conteudo_de_listas(items_que_quero_imprimir):\n print(\n '##########################################')\n for cada_indice_da_lista in items_que_quero_imprimir:\n print(cada_indice_da_lista)\n\n print('##########################################')\n\n\nimprime_conteudo_de_listas(filmes)\nimprime_conteudo_de_listas(carros)\nimprime_conteudo_de_listas(cidades)\nimprime_conteudo_de_listas(dic)\n","sub_path":"lista1.py","file_name":"lista1.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"524721577","text":"\n\"\"\"\ntrainMLP.py\n\nAuthor: Varun Dhingra\nAuthor: Pragash Vijayaragavan\nAuthor: Siddharth Subramanian\n\nA multi-layer perceptron.\n\"\"\"\nimport math\nimport random\nimport sys\nfrom _csv import reader\n\nimport matplotlib.pylab as plt\n\n\nclass Neuron:\n\n __slots__ = \"weights\", \"output\", \"delta\"\n\n def __init__(self, weights):\n self.weights = weights\n self.output = 0.0\n self.delta = 0.0\n\n def __str__(self):\n str1 = ' '\n for i in range(len(self.weights)):\n str1 = \" \" + str(self.weights[i]) + \" \"\n str2 = \" Delta: \" + str(self.delta)\n str3 = \" Output: \" + str(self.output)\n return \"Weights are \"+str1+str2+str3\n\n\nclass NeuralNet:\n\n __slots__ = \"network\",\"alpha\", \"nIn\", \"nHid\", \"nOut\"\n\n def __init__(self,alpha,nIn, nHid, nOut):\n self.alpha = alpha\n self.nIn = nIn\n self.nHid = nHid\n self.nOut = nOut\n self.network = self.initNet(nIn,nHid,nOut)\n\n def initNet(self,nIn, nHid, nOut):\n network = []\n wlist = []\n hLayer = []\n oLayer = []\n for i in range(nHid):\n n = Neuron\n wlist = []\n for j in range(nIn+1):\n wt = random.uniform(-1,1)\n wlist.append(wt)\n n = Neuron(wlist)\n hLayer.append(n)\n network.append(hLayer)\n\n for i in range(nOut):\n n = Neuron\n wlist = []\n for j in range(nHid+1):\n wt = random.uniform(-1,1)\n wlist.append(wt)\n n = Neuron(wlist)\n oLayer.append(n)\n network.append(oLayer)\n return network\n\n def plotter(self,x,y):\n plt.title(\"Epoch vs SSE\")\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Sum of Squared Error\")\n plt.plot(x,y)\n plt.show()\n\n def fileWrite(self,epochnum):\n filename = \"./trained-\"+str(epochnum)+\".csv\"\n # filename = \"./trained-10000.csv\"\n with open(filename, 'a') as fobj:\n for layer in self.network:\n for neuron in layer:\n str1 = ''\n # print(neuron.weights)\n for w in neuron.weights:\n str1 = str1 + \",\" + str(w)\n str1 = str1[1:]\n str1 = str1 + '\\n'\n fobj.write(str1)\n\n def predict(self,row):\n outputs = self.fwdPropogate(row)\n return outputs.index(max(outputs))+1\n\n def recogRate(self,actual, predicted, epoch):\n correct = 0\n for i in range(len(actual)):\n if actual[i] == predicted[i]:\n correct += 1\n print(\"Accuracy at \",epoch )\n print(correct / float(len(actual)) * 100.0)\n\n def iterator(self, dataset):\n print(\"Start Training\")\n sse = []\n epochnum = []\n epochlist = [0, 10, 100, 1000, 10000]\n for i in range(10000):\n epochnum.append(i)\n ssError = self.trainer(dataset)\n sse.append(ssError)\n if i+1 in epochlist:\n self.plotter(epochnum, sse)\n self.fileWrite(i+1)\n # predictions = []\n # actual = []\n # for row in dataset:\n # prediction = self.predict(row)\n # actual.append(row[-1])\n # predictions.append(prediction)\n # self.recogRate(actual,predictions, i+1)\n # print(predictions)\n\n # Train Network-------------------------------------------\n def trainer(self, train):\n outputs = []\n sumError = 0\n for row in train:\n outputs = self.fwdPropogate(row)\n expected = [0 for i in range(self.nOut)]\n expected[(row[-1]-1)] = 1\n self.backproErr(expected)\n self.updateWts(row)\n sumError += sum([(expected[i]-outputs[i])**2 for i in range(len(expected))])\n return sumError\n\n # Forward Propogation------------------------------------\n\n def active(self,weights, inputs):\n activation = weights[-1]\n for i in range(len(inputs)-1):\n activation += weights[i] * inputs[i]\n return activation\n\n def sigmoid(self,output):\n return 1.0 / ( 1+ math.exp(-output) )\n\n def fwdPropogate(self, row):\n outputs = []\n for i in range(len(self.network)):\n layer = self.network[i]\n outputs = []\n if i == 0:\n for neuron in layer:\n act = self.active(neuron.weights, row)\n neuron.output = self.sigmoid(act)\n outputs.append(neuron.output)\n else:\n inputs = []\n for neuron in self.network[i-1]:\n inputs.append(neuron.output)\n for neuron in layer:\n act = self.active(neuron.weights, inputs)\n neuron.output = self.sigmoid(act)\n outputs.append(neuron.output)\n return outputs\n\n # Backward Propogation-----------------------------------\n def transferDer(self,output):\n return output * (1.0 - output)\n\n def backproErr(self, expected):\n for i in reversed(range(len(self.network))):\n layer = self.network[i]\n errors = list()\n if i != len(self.network)-1:\n for j in range(len(layer)):\n error = 0.0\n for neuron in self.network[i + 1]:\n error += (neuron.weights[j] * neuron.delta)\n # neuron.delta = error * self.transferDer(neuron.output)\n errors.append(error)\n else:\n for j in range(len(layer)):\n neuron = layer[j]\n error = expected[j] - neuron.output\n errors.append(error)\n # neuron.delta = error * self.transferDer(neuron.output)\n for j in range(len(layer)):\n neuron = layer[j]\n neuron.delta = errors[j] * self.transferDer(neuron.output)\n\n def updateWts(self, row):\n for i in range(len(self.network)):\n inputs = row\n if i != 0:\n inputs = [neuron.output for neuron in self.network[i - 1]]\n\n for neuron in self.network[i]:\n for j in range(len(inputs)-1):\n neuron.weights[j] += self.alpha * neuron.delta * inputs[j]\n neuron.weights[-1] += self.alpha * neuron.delta\n\n\ndef loadcleaner(filename):\n dataset = []\n with open(filename) as fobj:\n csvReader = reader(fobj)\n for row in csvReader:\n if not row:\n continue\n dataset.append(row)\n\n for row in dataset:\n c = int (row[-1])\n row[-1] = c\n for i in range(len(row)-1):\n val = float(row[i])\n row[i] = val\n return dataset\n\n\ndef main():\n\n filename = sys.argv[1]\n trainset = loadcleaner(filename)\n print(trainset)\n nIn = len(trainset[0]) - 1\n nOut = len(set([row[-1] for row in trainset]))\n epochlist = [0,10,100,1000,10000]\n alpha = 0.1\n\n net = NeuralNet(alpha,nIn,5,nOut)\n\n net.iterator(trainset)\n\nif __name__ == '__main__':\n main()\n","sub_path":"IntelligentSystemsAssignments/ISproj2/trainMLP.py","file_name":"trainMLP.py","file_ext":"py","file_size_in_byte":7286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"547346655","text":"import tensorflow as tf\nfrom utils import variable_summaries\n\n\nclass MF(object):\n\n def __init__(self, hyperparams, train_config):\n\n self.train_config = train_config\n\n # create placeholder\n self.u = tf.placeholder(tf.int32, [None]) # [B]\n self.i = tf.placeholder(tf.int32, [None]) # [B]\n self.y = tf.placeholder(tf.float32, [None]) # [B]\n self.w = tf.placeholder(tf.float32, [None]) # [B]\n self.lr = tf.placeholder(tf.float32, [], name='learning_rate')\n\n # -- create embed begin ----\n user_emb_w = tf.get_variable(\"user_emb_w\", [hyperparams['num_users'], hyperparams['user_embed_dim']])\n item_emb_w = tf.get_variable(\"item_emb_w\", [hyperparams['num_items'], hyperparams['item_embed_dim']])\n user_b = tf.get_variable(\"user_b\", [hyperparams['num_users']], initializer=tf.constant_initializer(0.0))\n item_b = tf.get_variable(\"item_b\", [hyperparams['num_items']], initializer=tf.constant_initializer(0.0))\n # -- create embed end ----\n\n # -- embed begin -------\n u_emb = tf.nn.embedding_lookup(user_emb_w, self.u)\n i_emb = tf.nn.embedding_lookup(item_emb_w, self.i)\n u_b = tf.gather(user_b, self.u) # [B]\n i_b = tf.gather(item_b, self.i) # [B]\n # -- embed end -------\n\n interaction = tf.reduce_sum(u_emb * i_emb, axis=-1) # [B]\n self.logits = interaction + u_b + i_b # [B]\n self.scores = tf.nn.sigmoid(self.logits) # scores is logits into sigmoid, for inference\n\n variable_summaries(self.logits, 'logits')\n variable_summaries(self.scores, 'scores')\n\n # return same dimension as input tensors, let x = logits, z = labels, z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n self.losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.y)\n variable_summaries(self.losses, 'loss')\n\n self.loss = tf.reduce_mean(self.losses * self.w) # for training loss\n\n # global update step variable\n self.global_step = tf.Variable(0, trainable=False, name='global_step')\n\n # optimizer\n if train_config['optimizer'] == 'adam':\n optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)\n elif train_config['optimizer'] == 'rmsprop':\n optimizer = tf.train.RMSPropOptimizer(learning_rate=self.lr)\n else:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr)\n\n # compute gradients and different update step\n trainable_params = tf.trainable_variables()\n grads = tf.gradients(self.loss, trainable_params) # return a list of gradients (A list of `sum(dy/dx)` for each x in `xs`)\n clip_grads, _ = tf.clip_by_global_norm(grads, 5)\n clip_grads_tuples = zip(clip_grads, trainable_params)\n self.train_op = optimizer.apply_gradients(clip_grads_tuples, global_step=self.global_step)\n\n self.merged = tf.summary.merge_all()\n\n def train(self, sess, batch):\n loss, _ = sess.run([self.loss, self.train_op], feed_dict={\n self.u: batch[0],\n self.i: batch[1],\n self.y: batch[2],\n self.w: batch[3],\n self.lr: self.train_config['lr']\n })\n return loss\n\n def inference(self, sess, batch):\n scores = sess.run(self.scores, feed_dict={\n self.u: batch[0],\n self.i: batch[1]\n })\n return scores\n\n def compute_logits(self, sess, batch):\n logits = sess.run(self.logits, feed_dict={\n self.u: batch[0],\n self.i: batch[1]\n })\n return logits\n\n def create_batch_summary(self, sess, batch):\n batch_summary = sess.run(self.merged, feed_dict={\n self.u: batch[0],\n self.i: batch[1],\n self.y: batch[2],\n self.w: batch[3]\n })\n return batch_summary\n","sub_path":"SRIU/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"526329906","text":"from django.http import HttpResponse\nfrom django.db.models import Q\nfrom rest_framework.response import Response\nfrom flight.models import Flight\nfrom flight_route.models import JourneyInfo\nfrom .serializers import DataSerializer\n\nfrom rest_framework.views import APIView\n\n\nclass GetSearchView(APIView):\n \"\"\"\n\n \"\"\"\n def get(self, request):\n \"\"\"\n ---\n parameters:\n - name: msn\n required: false\n paramType: query\n - name: flight_number\n \"\"\"\n flight_details = []\n try:\n msn = request.GET.get(\"msn\")\n flight_number = request.GET.get(\"flight_number\")\n\n if not msn and not flight_number:\n return Response({\"success\": False, \"message\": \"Give either msn or flight number\"})\n if not flight_number:\n flight_details = JourneyInfo.objects.filter(flight__msn=request.GET.get(\"msn\"))\n if not msn:\n flight_details = JourneyInfo.objects.filter(flight_route__flight_number=flight_number)\n else:\n flight_details = JourneyInfo.objects.filter(flight__msn=request.GET.get(\"msn\"), flight_route__flight_number=flight_number)\n serializer = DataSerializer(flight_details, many=True)\n return Response({\"success\": True, \"data\": serializer.data})\n\n except Exception as e:\n return Response({\"success\": False, \"message\": \"Something Went Wrong \" + str(e)})\n\n\nclass GetFilterView(APIView):\n \"\"\"\n\n \"\"\"\n def get(self, request):\n \"\"\"\n ---\n parameters:\n - name: journey_dates\n required: false\n paramType: query\n - name: source_destinations\n - name: end_destinations\n\n \"\"\"\n flight_details = []\n try:\n source_destination = request.GET.get(\"source_destination\")\n end_destination = request.GET.get(\"end_destination\")\n start_date = request.GET.get('start_date')\n end_date = request.GET.get('end_date')\n flight_details = JourneyInfo.objects.filter(Q(flight_route__start_location=source_destination)| \n Q(flight_route__end_location=end_destination)| \n Q(flight_route__start_time__date=start_date)|\n Q(flight_route__end_time__date=end_date)|\n Q(flight__msn=request.GET.get('msn'))|\n Q(flight_route__flight_number=request.GET.get(\"flight_number\"))|\n Q(flight__flight_aircraft_id=request.GET.get('flight_id')))\n\n \n serializer = DataSerializer(flight_details, many=True)\n return Response({\"success\": True, \"data\": serializer.data})\n except Exception as e:\n return Response({\"success\": False, \"message\": \"Something Went Wrong \" + str(e)})\n","sub_path":"search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"411490420","text":"import os\nimport tensorflow as tf\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n#OBS: Usar o tensorflow na sua versão 1.14!\n\n# Como nosso dataset contem string na coluna Y (classes) faremos uma substituicao (encode) da string por um vetor\ndef label_encode(label):\n val = []\n if label == \"Iris-setosa\":\n val = [1, 0, 0]\n elif label == \"Iris-versicolor\":\n val = [0, 1, 0]\n elif label == \"Iris-virginica\":\n val = [0, 0, 1]\n return val\n\n\ndef data_encode(file):\n X = []\n Y = []\n train_file = open(file, 'r')\n for line in train_file.read().strip().split('\\n'):\n line = line.split(',')\n X.append([line[0], line[1], line[2], line[3]])\n Y.append(label_encode(line[4]))\n return X, Y\n\n\n# parametros da rede\nlearning_rate = 0.01\ntraining_epochs = 5000\ndisplay_steps = 100\n\nn_input = 4 # quantos valores de entrada?\nn_hidden = 10 # quantos neurônios na camada oculta?\nn_output = 3 # quantos neuronios na camada de saida?\n\n# a partir daqui construimos o modelo\nX = tf.placeholder(\"float\", [None, n_input])\nY = tf.placeholder(\"float\", [None, n_output])\n\nweights = {\n \"hidden\": tf.Variable(tf.random_normal([n_input, n_hidden])),\n \"output\": tf.Variable(tf.random_normal([n_hidden, n_output])),\n}\n\nbias = {\n \"hidden\": tf.Variable(tf.random_normal([n_hidden])),\n \"output\": tf.Variable(tf.random_normal([n_output])),\n}\n\n\ndef model(X, weights, bias):\n layer1 = tf.add(tf.matmul(X, weights[\"hidden\"]), bias[\"hidden\"])\n layer1 = tf.nn.relu(layer1)\n\n output_layer = tf.matmul(layer1, weights[\"output\"]) + bias[\"output\"]\n return output_layer\n\n\ntrain_X, train_Y = data_encode(\"iris.train\") # dataset de treinamento\ntest_X, test_Y = data_encode(\"iris.test\") # dataset de validacao\n\npred = model(X, weights, bias)\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=Y))\noptimizador = tf.train.AdamOptimizer(learning_rate).minimize(cost)\n\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init)\n\n for epochs in range(training_epochs):\n _, c = sess.run([optimizador, cost], feed_dict={X: train_X, Y: train_Y})\n if (epochs + 1) % display_steps == 0:\n print(\"Epoch:\", epochs + 1, \"Cost:\", c)\n print(\"Optimization Finished\")\n\n test_result = sess.run(pred, feed_dict={X: train_X})\n correct_prediction = tf.equal(tf.argmax(test_result, 1), tf.argmax(train_Y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n print(\"accuracy:\", accuracy.eval({X: test_X, Y: test_Y}))\n","sub_path":"MLP/MLP_Iris.py","file_name":"MLP_Iris.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"295044908","text":"from scipy import linalg as la\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport cmath\nimport math\nfrom scipy.sparse import dok_matrix\nimport sympy as sy\nfrom matplotlib import pyplot as plt\nfrom numba import jit\nfrom sympy.abc import x\nfrom sympy.utilities.lambdify import lambdify, implemented_function\nfrom sympy import Function\nfrom numba import jit\nfrom autograd import grad\nimport autograd.numpy as anp\nfrom autograd import jacobian\nfrom autograd import elementwise_grad\nimport time\n\n# Problem 1\nprint(\"\\n\\n Problem 1 \\n\")\ndef f_prime(Functions, X):\n \n original_fn = []\n prime_fn = []\n for i in range(len(X)):\n x = sy.symbols('x')\n fx = lambda x: Functions\n lam_f_x = lambdify(x, fx(x))\n lam_f_prime_x = lambdify(x, sy.diff(fx(x)))\n original_fn.append(lam_f_x(X[i]))\n prime_fn.append(lam_f_prime_x(X[i]))\n return original_fn, np.array(prime_fn), lam_f_x, lam_f_prime_x, X\n\nX = np.linspace(-np.pi, np.pi, 200)\nx = sy.symbols('x')\nFunctions = (sy.sin(x) + 1)**sy.sin(sy.cos(x))\noriginal_fn, prime_fn, lam_f_x, lam_f_prime_x, X = f_prime(Functions, X)\n\nplt.plot(X, original_fn)\nplt.plot(X, prime_fn, color = \"red\")\nax = plt.gca()\nax.spines[\"bottom\"].set_position(\"zero\")\nplt.show()\n\n# Problem 2\nprint(\"\\n\\n Problem 2 \\n\")\ndef f_prime_different(Functions, X, h):\n original_fn = []\n prime_fn = []\n foward1 = []\n foward2 = []\n backward1 = []\n backward2 = []\n centered1 = []\n centered2 = []\n \n for i in range(len(X)):\n x = sy.symbols('x')\n fx = lambda x: Functions\n lam_f_x = lambdify(x, fx(x))\n \n foward1.append( ( lam_f_x(X[i] + h) - lam_f_x(X[i]) ) / h )\n foward2.append( ( -3*lam_f_x(X[i]) + 4*lam_f_x(X[i] + h) - lam_f_x(X[i] + 2*h) ) / (2*h) )\n backward1.append( ( lam_f_x(X[i]) - lam_f_x(X[i] - h) ) / h )\n backward2.append( ( 3*lam_f_x(X[i]) - 4*lam_f_x(X[i] - h) + lam_f_x(X[i] - 2*h) ) / (2*h) )\n centered1.append( ( lam_f_x(X[i] + h) - lam_f_x(X[i] - h) ) / (2*h) )\n centered2.append( ( lam_f_x(X[i] - 2*h) - 8*lam_f_x(X[i] - h) + 8*lam_f_x(X[i] + h) - lam_f_x(X[i] + 2*h) ) / (12*h) )\n \n original_fn.append(lam_f_x(X[i]))\n \n return original_fn, np.array(foward1), np.array(foward2), np.array(backward1), np.array(backward2), np.array(centered1), np.array(centered2)\n\n\nFunctions = (sy.sin(x) + 1)**sy.sin(sy.cos(x))\nX = np.linspace(-np.pi, np.pi, 200)\noriginal_fn, foward1, foward2, backward1, backward2, centered1, centered2 = f_prime_different(Functions, X, h = 1e-10)\n\n\n\nplt.plot(X, original_fn, label = \"orginal function\")\nplt.plot(X, prime_fn, color = \"red\", label = \"prime fn from problem1\")\nplt.plot(X, foward1, label = \"foward1\") # pass\nplt.plot(X, foward2, label = \"foward2\")\nplt.plot(X, backward1, label = \"backward1\") # pass\nplt.plot(X, backward2, label = \"backward2\")\nplt.plot(X, centered1, label = \"centered1\")\nplt.plot(X, centered2, label = \"centered2\")\nplt.legend()\nax = plt.gca()\nax.spines[\"bottom\"].set_position(\"zero\")\nplt.show()\n\n# Problem 3\nprint(\"\\n\\n Problem 3\\n\")\n@jit\ndef problem3(X, h):\n Functions = (sy.sin(x) + 1)**sy.sin(sy.cos(x))\n original_fn, prime_fn, lam_f_x, lam_f_prime_x, X = f_prime(Functions, X)\n\n for1 = []\n for2 = []\n back1 = []\n back2 = []\n cent1 = []\n cent2 = []\n \n for i in range(len(h)):\n original_fn, foward1, foward2, backward1, backward2, centered1, centered2 = f_prime_different(Functions, X, h[i])\n for1.append(la.norm(prime_fn - foward1))\n for2.append(la.norm(prime_fn - foward2))\n back1.append(la.norm(prime_fn - backward1))\n back2.append(la.norm(prime_fn - backward2))\n cent1.append(la.norm(prime_fn - centered1))\n cent2.append(la.norm(prime_fn - centered2))\n return for1, for2, back1, back2, cent1, cent2\n\nX0 = [1]\nh = np.logspace(-8, 0, 9)\n\nfor1, for2, back1, back2, cent1, cent2 = problem3(X0, h)\n\nplt.loglog(h, for1, label = \"for1\")\nplt.loglog(h,for2, label = \"for2\")\nplt.loglog(h,back1, label = \"back1\")\nplt.loglog(h,back2, label = \"back2\")\nplt.loglog(h,cent1, label = \"cent1\")\nplt.loglog(h,cent2, label = \"cent2\")\nplt.legend()\nplt.show()\n\n# Problem 4\nprint(\"\\n\\n Problem 4 \\n\")\ndef radar():\n data = np.load(\"C:/Users/suket/Desktop/Homeworks/Computation/Week4/Problem3/plane.npy\")\n alpha = np.deg2rad(data[:, 1])\n beta = np.deg2rad(data[:, 2])\n\n X_Functions = lambda x, y : 500 * (np.tan(y)) / ( np.tan(y) - np.tan(x) )\n Y_Functions = lambda x, y : 500 * (np.tan(y) * np.tan(x)) / ( np.tan(y) - np.tan(x) )\n XY_Functions = lambda x_prime, y_prime : (x_prime ** 2 + y_prime ** 2) ** 0.5\n \n speed = []\n for t in range(7, 15, 1):\n if t == 7:\n x_prime = ((X_Functions(alpha[t-6], beta[t-6]) - X_Functions(alpha[t-7], beta[t-7])) / 1)\n y_prime = ((Y_Functions(alpha[t-6], beta[t-6]) - Y_Functions(alpha[t-7], beta[t-7])) / 1)\n speed.append( XY_Functions(x_prime, y_prime) )\n \n elif t > 7 and t < 14:\n x_prime = ((X_Functions(alpha[t-6], beta[t-6]) - X_Functions(alpha[t-8], beta[t-8])) / 2)\n y_prime = ((Y_Functions(alpha[t-6], beta[t-6]) - Y_Functions(alpha[t-8], beta[t-8])) / 2)\n XY_Functions(x_prime, y_prime)\n speed.append( XY_Functions(x_prime, y_prime) )\n else:\n x_prime = ((X_Functions(alpha[t-7], beta[t-7]) - X_Functions(alpha[t-8], beta[t-8])) / 1)\n y_prime = ((Y_Functions(alpha[t-7], beta[t-7]) - Y_Functions(alpha[t-8], beta[t-8])) / 1)\n XY_Functions(x_prime, y_prime)\n speed.append( XY_Functions(x_prime, y_prime) )\n \n return speed\n\nprint(radar())\n\n# Problem 5\nprint(\"\\n\\n Problem 5 \\n\")\ndef Jacobian(x_5, fx_5, h = 1e-10):\n Jacob = np.zeros((np.size(x_5), np.size(x_5)))\n I = np.identity(np.size(x_5))\n for i in range(np.size(x_5)):\n for j in range(np.size(fx_5)):\n Jacob[j, i] = (fx_5[j](x_5 + h*I[:, i]) - fx_5[j](x_5 - h*I[:, i])) / (2*h) \n return Jacob\n\nfx_5_1 = lambda x: x[0]**2\nfx_5_2 = lambda x: x[0]**3 - x[1]\n\nx_5 = np.array([5, 10])\nfx_5 = np.array([fx_5_1, fx_5_2])\n\nprint(Jacobian(x_5, fx_5, 1e-10))\n\n# Problem 6\nprint(\"\\n\\n Problem 6 \\n\")\ndef sympy_method(X, fx):\n x = sy.symbols('x')\n lam_f_x = lambdify(x, fx(x))\n lam_f_prime_x = lambdify(x, sy.diff(fx(x)))\n return lam_f_prime_x(X)\n\ndef second_order_method(x, f, h = 1e-10):\n secone_order = (f(x + h) - f(x - h)) / (2*h)\n return secone_order\n\ndef autograd(x, f):\n grad_g = grad(f)\n return grad_g(x)\n\nf_6 = lambda x: sy.log(sy.sin(sy.sqrt(x)))\nx_6 = sy.pi/4\nstart_time = time.clock()\nprint(\"Answer =\", sympy_method(x_6, f_6))\nprint(time.clock() - start_time)\n\nf_6 = lambda x: np.log(np.sin(np.sqrt(x)))\nx_6 = np.pi/4\nstart_time = time.clock()\nprint(\"Answer =\", second_order_method(x_6, f_6))\nprint(time.clock() - start_time)\n\nstart_time = time.clock()\nf_6 = lambda x: anp.log(anp.sin(anp.sqrt(x)))\nx_6 = anp.pi/4\nprint(\"Answer =\", autograd(x_6, f_6))\nprint(time.clock() - start_time)\n\n# Problem 7\nprint(\"\\n\\n Problem 7 \\n\")\n# Define the Taylor series.\n# Note that this function does not account for array broadcasting.\ndef taylor_exp_for_sin(x, N = 10000, tol=.0001):\n result = 0\n cur_term = x\n i = 0\n while anp.abs(cur_term) >= tol:\n # Autograd's version of NumPy doesn't have the math attribute so use NumPy.\n cur_term = ( (-1)**i/np.math.factorial(2*i + 1) ) * x**(2*i + 1)\n result += cur_term\n i += 1\n return result\n\ndef calculate_derivative(x0, N):\n # Compute the gradient.\n if N == 1:\n d_taylor_exp = grad(taylor_exp_for_sin)\n elif N ==2:\n d_taylor_exp = grad(grad(taylor_exp_for_sin))\n else:\n return False\n # Note that differentiation in autograd only works with float values.\n deri_f = []\n for i in range(len(x0)):\n deri_f.append(d_taylor_exp(x0[i], N))\n return deri_f\n\nx0 = np.linspace(-np.pi, np.pi , 500)\n\nderi_f = calculate_derivative(x0, 1)\nplt.plot(x0, np.sin(x0))\nplt.plot(x0, deri_f)\nax = plt.gca()\nax.spines[\"bottom\"].set_position(\"zero\")\nplt.show()\n\nderi_f = calculate_derivative(x0, 2)\nplt.plot(x0, np.sin(x0))\nplt.plot(x0, deri_f)\nax = plt.gca()\nax.spines[\"bottom\"].set_position(\"zero\")\nplt.show()\n\n# Problem 8\nprint(\"\\n\\n Problem 8 \\n\")\n\ndef sympy_jacob(x0, y0, X, Y):\n x = sy.symbols('x')\n y = sy.symbols('y')\n lam_f_x = lambdify((x, y), Y.jacobian(X))\n return lam_f_x(x0, y0)\n\ndef auto_jacob(x, f):\n jacob_f = jacobian(f)\n return jacob_f(x_8)\n\n#from sympy.abc import x, y\nx = sy.symbols('x')\ny = sy.symbols('y')\nY = sy.Matrix([sy.exp(x) * sy.sin(y) + y**3, 3*y - sy.cos(x)])\nX = sy.Matrix([x, y])\nx_8_1 = 1\nx_8_2 = 1\nstart_time = time.clock()\nprint(\"Answer =\", sympy_jacob(x_8_1, x_8_2, X, Y))\nprint(time.clock() - start_time)\n\nf_8_1 = lambda x : np.exp(x[0]) * np.sin(x[1]) + x[1]**3\nf_8_2 = lambda x : 3*x[1] - np.cos(x[0])\nx_8 = np.array([1, 1])\nf_8 = np.array([f_8_1, f_8_2])\nstart_time = time.clock()\nprint(\"\\nAnswer =\", Jacobian(x_8, f_8))\nprint(time.clock() - start_time)\n\nf_8 = lambda x : anp.array([anp.exp(x[0]) * anp.sin(x[1]) + x[1]**3, 3*x[1] - anp.cos(x[0])])\nx_8 = anp.array([1.0, 1.0])\nprint(\"\\nAnswer =\", auto_jacob(x_8, f_8))\nprint(time.clock() - start_time)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Homeworks/Computation/Week4/Problem3/Problem_set_4_3.py","file_name":"Problem_set_4_3.py","file_ext":"py","file_size_in_byte":9288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"311355550","text":"from ..base import MLClassifierBase\nimport copy\nimport numpy as np\n\nclass BinaryRelevance(MLClassifierBase):\n \"\"\"Binary Relevance multi-label classifier.\"\"\"\n BRIEFNAME = \"BR\"\n \n def __init__(self, classifier = None):\n super(BinaryRelevance, self).__init__(classifier)\n\n def fit(self, X, y):\n \"\"\"Fit classifier according to X,y, see base method's documentation.\"\"\"\n self.classifiers = []\n self.label_count = len(y[0])\n\n for i in xrange(self.label_count):\n classifier = copy.deepcopy(self.classifier)\n y_subset = self.generate_data_subset(y,i)\n classifier.fit(X,y_subset)\n self.classifiers.append(classifier)\n\n return self\n\n def predict(self, X):\n \"\"\"Predict labels for X, see base method's documentation.\"\"\"\n result = np.zeros((len(X), self.label_count), dtype='i8')\n \n for label in xrange(self.label_count):\n prediction = self.classifiers[label].predict(X)\n\n for row in xrange(len(X)):\n result[row, label] = prediction[row]\n\n return result\n","sub_path":"skmultilearn/meta/br.py","file_name":"br.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"331090784","text":"from django.conf.urls import patterns, url\nfrom wypozyczalnia_app import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n url(r'^rejestracja/$', views.rejestracja, name='rejestracja'),\n url(r'^rejestruj/$', views.rejestruj, name='rejestruj'),\n url(r'^logowanie/$', views.logowanie, name='logowanie'),\n url(r'^zaloguj/$', views.zaloguj, name='zaloguj'),\n url(r'^aktywacja/$', views.aktywacja, name='aktywacja'),\n url(r'^aktywuj/$', views.aktywuj, name='aktywuj'),\n url(r'^wypozyczanie/$', views.wypozyczanie, name='wypozyczanie'),\n url(r'^wypozycz/$', views.wypozycz, name='wypozycz'),\n url(r'^zwroc_rower/$', views.zwroc_rower, name='zwroc_rower'),\n url(r'^zwroc/$', views.zwroc, name='zwroc'),\n)","sub_path":"wypozyczalnia_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"32705315","text":"# Copyright (C) 2011 Canonical\n#\n# Authors:\n# Matthew McGowan\n# Michael Vogt\n#\n# This program is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free Software\n# Foundation; version 3.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA\n\nfrom gi.repository import Gtk, Gdk, GObject, Pango\n\nfrom softwarecenter.utils import utf8\nfrom softwarecenter.ui.gtk3.em import EM\nfrom softwarecenter.ui.gtk3.models.appstore2 import CategoryRowReference\n\nfrom stars import StarRenderer, StarSize\n\n\nclass CellButtonIDs:\n INFO = 0\n ACTION = 1\n\n\n# custom cell renderer to support dynamic grow\nclass CellRendererAppView(Gtk.CellRendererText):\n\n # x, y offsets for the overlay icon\n OVERLAY_XO = OVERLAY_YO = 2\n\n # size of the install overlay icon\n OVERLAY_SIZE = 16\n\n # ratings\n MAX_STARS = 5\n STAR_SIZE = EM\n\n # initialize declared properties (LP: #965937)\n application = GObject.Property(\n type=GObject.TYPE_PYOBJECT,\n nick='document',\n blurb='a xapian document containing pkg information',\n flags=(GObject.PARAM_READWRITE | GObject.PARAM_CONSTRUCT),\n default=None)\n isactive = GObject.Property(\n type=bool,\n nick='isactive',\n blurb='is cell active/selected',\n flags=(GObject.PARAM_READWRITE | GObject.PARAM_CONSTRUCT),\n default=False)\n\n def __init__(self, icons, layout, show_ratings, overlay_icon_name):\n Gtk.CellRendererText.__init__(self)\n\n # the icon pixbuf to be displayed in the row\n self.icon = None\n\n # geometry-state values\n self.pixbuf_width = 0\n self.apptitle_width = 0\n self.apptitle_height = 0\n self.normal_height = 0\n self.selected_height = 0\n self.show_ratings = show_ratings\n\n # button packing\n self.button_spacing = 0\n self._buttons = {\n Gtk.PackType.START: [],\n Gtk.PackType.END: []\n }\n self._all_buttons = {}\n\n # cache a layout\n self._layout = layout\n # star painter, paints stars\n self._stars = StarRenderer()\n self._stars.size = StarSize.SMALL\n\n # icon/overlay jazz\n try:\n self._installed = icons.load_icon(overlay_icon_name,\n self.OVERLAY_SIZE, 0)\n except GObject.GError:\n # icon not present in theme, probably because running uninstalled\n self._installed = icons.load_icon('emblem-system',\n self.OVERLAY_SIZE, 0)\n\n def _layout_get_pixel_width(self, layout):\n return layout.get_size()[0] / Pango.SCALE\n\n def _layout_get_pixel_height(self, layout):\n return layout.get_size()[1] / Pango.SCALE\n\n def _render_category(self,\n context, cr, app, cell_area, layout, xpad, ypad, is_rtl):\n\n layout.set_markup('%s' % app.display_name, -1)\n\n # work out max allowable layout width\n lw = self._layout_get_pixel_width(layout)\n lh = self._layout_get_pixel_height(layout)\n\n if not is_rtl:\n x = cell_area.x\n else:\n x = cell_area.x + cell_area.width - lw\n y = cell_area.y + (cell_area.height - lh) / 2\n\n Gtk.render_layout(context, cr, x, y, layout)\n\n def _render_price(self, context, cr, app, layout, cell_area, xpad, ypad,\n is_rtl):\n layout.set_markup(\"US$ %s\" % self.model.get_price(app), -1)\n\n if is_rtl:\n x = cell_area.x + xpad\n else:\n x = (cell_area.x + cell_area.width - xpad -\n self._layout_get_pixel_width(layout))\n\n Gtk.render_layout(context, cr,\n x, ypad + cell_area.y, layout)\n\n def _render_icon(self, cr, app, cell_area, xpad, ypad, is_rtl):\n # calc offsets so icon is nicely centered\n self.icon = self.model.get_icon(app)\n self.icon_x_offset = xpad + cell_area.x\n self.icon_y_offset = ypad + cell_area.y\n xo = (self.pixbuf_width - self.icon.get_width()) / 2\n\n if not is_rtl:\n x = cell_area.x + xo + xpad\n else:\n x = cell_area.x + cell_area.width + xo - self.pixbuf_width - xpad\n y = cell_area.y + ypad\n\n # draw appicon pixbuf\n Gdk.cairo_set_source_pixbuf(cr, self.icon, x, y)\n cr.paint()\n\n # draw overlay if application is installed\n if self.model.is_installed(app):\n if not is_rtl:\n x += (self.pixbuf_width - self.OVERLAY_SIZE + self.OVERLAY_XO)\n else:\n x -= self.OVERLAY_XO\n y += (self.pixbuf_width - self.OVERLAY_SIZE + self.OVERLAY_YO)\n Gdk.cairo_set_source_pixbuf(cr, self._installed, x, y)\n cr.paint()\n\n def _render_summary(self, context, cr, app,\n cell_area, layout, xpad, ypad,\n star_width, is_rtl):\n\n layout.set_markup(self.model.get_markup(app), -1)\n\n # work out max allowable layout width\n layout.set_width(-1)\n lw = self._layout_get_pixel_width(layout)\n max_layout_width = (cell_area.width - self.pixbuf_width -\n 3 * xpad - star_width)\n\n max_layout_width = cell_area.width - self.pixbuf_width - 3 * xpad\n\n stats = self.model.get_review_stats(app)\n if self.show_ratings and stats:\n max_layout_width -= star_width + 6 * xpad\n\n if (self.props.isactive and\n self.model.get_transaction_progress(app) > 0):\n action_btn = self.get_button_by_name(CellButtonIDs.ACTION)\n max_layout_width -= (xpad + action_btn.width)\n\n if lw >= max_layout_width:\n layout.set_width((max_layout_width) * Pango.SCALE)\n layout.set_ellipsize(Pango.EllipsizeMode.MIDDLE)\n lw = max_layout_width\n\n apptitle_extents = layout.get_line_readonly(0).get_pixel_extents()[1]\n self.apptitle_width = apptitle_extents.width\n self.apptitle_height = apptitle_extents.height\n\n if not is_rtl:\n x = cell_area.x + 2 * xpad + self.pixbuf_width\n else:\n x = (cell_area.x + cell_area.width - lw - self.pixbuf_width -\n 2 * xpad)\n\n y = cell_area.y + ypad\n\n Gtk.render_layout(context, cr, x, y, layout)\n\n def _render_rating(self, context, cr, app,\n cell_area, layout, xpad, ypad,\n star_width, star_height, is_rtl):\n\n stats = self.model.get_review_stats(app)\n if not stats:\n return\n\n sr = self._stars\n\n if not is_rtl:\n x = (cell_area.x + 3 * xpad + self.pixbuf_width +\n self.apptitle_width)\n else:\n x = (cell_area.x + cell_area.width\n - 3 * xpad\n - self.pixbuf_width\n - self.apptitle_width\n - star_width)\n\n y = cell_area.y + ypad + (self.apptitle_height - self.STAR_SIZE) / 2\n\n sr.rating = stats.ratings_average\n sr.render_star(context, cr, x, y)\n\n # and nr-reviews in parenthesis to the right of the title\n nreviews = stats.ratings_total\n s = \"(%i)\" % nreviews\n\n layout.set_markup(\"%s\" % s, -1)\n\n if not is_rtl:\n x += xpad + star_width\n else:\n x -= xpad + self._layout_get_pixel_width(layout)\n\n context.save()\n context.add_class(\"cellrenderer-avgrating-label\")\n Gtk.render_layout(context, cr, x, y, layout)\n context.restore()\n\n def _render_progress(self, context, cr, progress, cell_area, ypad, is_rtl):\n percent = progress * 0.01\n # per the spec, the progressbar should be the width of the action\n # button\n action_btn = self.get_button_by_name(CellButtonIDs.ACTION)\n\n x, _, w, h = action_btn.allocation\n # shift the bar to the top edge\n y = cell_area.y + ypad\n\n context.save()\n context.add_class(\"trough\")\n\n Gtk.render_background(context, cr, x, y, w, h)\n Gtk.render_frame(context, cr, x, y, w, h)\n\n context.restore()\n\n bar_size = w * percent\n\n context.save()\n context.add_class(\"progressbar\")\n\n if (bar_size > 0):\n if is_rtl:\n x += (w - bar_size)\n Gtk.render_activity(context, cr, x, y, bar_size, h)\n\n context.restore()\n\n def _render_buttons(self, context, cr, cell_area, layout, xpad, ypad,\n is_rtl):\n\n # layout buttons and paint\n y = cell_area.y + cell_area.height - ypad\n spacing = self.button_spacing\n\n if not is_rtl:\n start = Gtk.PackType.START\n end = Gtk.PackType.END\n xs = cell_area.x + 2 * xpad + self.pixbuf_width\n xb = cell_area.x + cell_area.width - xpad\n else:\n start = Gtk.PackType.END\n end = Gtk.PackType.START\n xs = cell_area.x + xpad\n xb = cell_area.x + cell_area.width - 2 * xpad - self.pixbuf_width\n\n for btn in self._buttons[start]:\n btn.set_position(xs, y - btn.height)\n btn.render(context, cr, layout)\n xs += btn.width + spacing\n\n for btn in self._buttons[end]:\n xb -= btn.width\n btn.set_position(xb, y - btn.height)\n btn.render(context, cr, layout)\n\n xb -= spacing\n\n def set_pixbuf_width(self, w):\n self.pixbuf_width = w\n\n def set_button_spacing(self, spacing):\n self.button_spacing = spacing\n\n def get_button_by_name(self, name):\n if name in self._all_buttons:\n return self._all_buttons[name]\n\n def get_buttons(self):\n btns = ()\n for k, v in self._buttons.items():\n btns += tuple(v)\n return btns\n\n def button_pack(self, btn, pack_type=Gtk.PackType.START):\n self._buttons[pack_type].append(btn)\n self._all_buttons[btn.name] = btn\n\n def button_pack_start(self, btn):\n self.button_pack(btn, Gtk.PackType.START)\n\n def button_pack_end(self, btn):\n self.button_pack(btn, Gtk.PackType.END)\n\n def do_set_property(self, pspec, value):\n setattr(self, pspec.name, value)\n\n def do_get_property(self, pspec):\n return getattr(self, pspec.name)\n\n def do_get_preferred_height_for_width(self, treeview, width):\n\n if not self.get_properties(\"isactive\")[0]:\n return self.normal_height, self.normal_height\n\n return self.selected_height, self.selected_height\n\n def do_render(self, cr, widget, bg_area, cell_area, flags):\n app = self.props.application\n if not app:\n return\n\n self.model = widget.appmodel\n\n context = widget.get_style_context()\n xpad = self.get_property('xpad')\n ypad = self.get_property('ypad')\n star_width, star_height = self._stars.get_visible_size(context)\n is_rtl = widget.get_direction() == Gtk.TextDirection.RTL\n layout = self._layout\n\n # important! ensures correct text rendering, esp. when using hicolor\n # theme\n #~ if (flags & Gtk.CellRendererState.SELECTED) != 0:\n #~ # this follows the behaviour that gtk+ uses for states in\n #~ # treeviews\n #~ if widget.has_focus():\n #~ state = Gtk.StateFlags.SELECTED\n #~ else:\n #~ state = Gtk.StateFlags.ACTIVE\n #~ else:\n #~ state = Gtk.StateFlags.NORMAL\n\n context.save()\n #~ context.set_state(state)\n\n if isinstance(app, CategoryRowReference):\n self._render_category(context, cr, app,\n cell_area,\n layout,\n xpad, ypad,\n is_rtl)\n return\n\n self._render_icon(cr, app,\n cell_area,\n xpad, ypad,\n is_rtl)\n\n self._render_summary(context, cr, app,\n cell_area,\n layout,\n xpad, ypad,\n star_width,\n is_rtl)\n\n # only show ratings if we have one\n if self.show_ratings:\n self._render_rating(context, cr, app,\n cell_area,\n layout,\n xpad, ypad,\n star_width,\n star_height,\n is_rtl)\n\n progress = self.model.get_transaction_progress(app)\n if progress > 0:\n self._render_progress(context, cr, progress,\n cell_area,\n ypad,\n is_rtl)\n\n elif self.model.is_purchasable(app):\n self._render_price(context, cr, app, layout,\n cell_area, xpad, ypad, is_rtl)\n\n # below is the stuff that is only done for the active cell\n if not self.props.isactive:\n return\n\n self._render_buttons(context, cr,\n cell_area,\n layout,\n xpad, ypad,\n is_rtl)\n\n context.restore()\n\n\nclass CellButtonRenderer(object):\n\n def __init__(self, widget, name, use_max_variant_width=True):\n # use_max_variant_width is currently ignored. assumed to be True\n\n self.name = name\n self.markup_variants = {}\n self.current_variant = None\n\n self.xpad = 12\n self.ypad = 4\n self.allocation = [0, 0, 1, 1]\n self.state = Gtk.StateFlags.NORMAL\n self.has_focus = False\n self.visible = True\n\n self.widget = widget\n\n def _layout_reset(self, layout):\n layout.set_width(-1)\n layout.set_ellipsize(Pango.EllipsizeMode.NONE)\n\n @property\n def x(self):\n return self.allocation[0]\n\n @property\n def y(self):\n return self.allocation[1]\n\n @property\n def width(self):\n return self.allocation[2]\n\n @property\n def height(self):\n return self.allocation[3]\n\n def configure_geometry(self, layout):\n self._layout_reset(layout)\n max_size = (0, 0)\n\n for k, variant in self.markup_variants.items():\n safe_markup = GObject.markup_escape_text(utf8(variant))\n layout.set_markup(safe_markup, -1)\n size = layout.get_size()\n max_size = max(max_size, size)\n\n w, h = max_size\n w /= Pango.SCALE\n h /= Pango.SCALE\n\n self.set_size(w + 2 * self.xpad, h + 2 * self.ypad)\n\n def point_in(self, px, py):\n x, y, w, h = self.allocation\n return (px >= x and px <= x + w and\n py >= y and py <= y + h)\n\n def get_size(self):\n return self.allocation[2:]\n\n def set_position(self, x, y):\n self.allocation[:2] = int(x), int(y)\n\n def set_size(self, w, h):\n self.allocation[2:] = int(w), int(h)\n\n def set_state(self, state):\n if not isinstance(state, Gtk.StateFlags):\n msg = (\"state should be of type Gtk.StateFlags, got %s\" %\n type(state))\n raise TypeError(msg)\n\n elif state == self.state:\n return\n\n self.state = state\n self.widget.queue_draw_area(*self.allocation)\n\n def set_sensitive(self, is_sensitive):\n if is_sensitive:\n state = Gtk.StateFlags.PRELIGHT\n else:\n state = Gtk.StateFlags.INSENSITIVE\n self.set_state(state)\n\n def show(self):\n self.visible = True\n\n def hide(self):\n self.visible = False\n\n def set_markup(self, markup):\n self.markup_variant = (markup,)\n\n def set_markup_variants(self, markup_variants):\n if not isinstance(markup_variants, dict):\n msg = type(markup_variants)\n raise TypeError(\"Expects a dict object, got %s\" % msg)\n\n elif not markup_variants:\n return\n\n self.markup_variants = markup_variants\n self.current_variant = markup_variants.keys()[0]\n\n def set_variant(self, current_var):\n self.current_variant = current_var\n\n def is_sensitive(self):\n return self.state is not Gtk.StateFlags.INSENSITIVE\n\n def render(self, context, cr, layout):\n if not self.visible:\n return\n\n x, y, width, height = self.allocation\n\n context.save()\n context.add_class(\"cellrenderer-button\")\n\n if self.has_focus:\n context.set_state(self.state | Gtk.StateFlags.FOCUSED)\n else:\n context.set_state(self.state)\n\n # render background and focal frame if has-focus\n context.save()\n context.add_class(Gtk.STYLE_CLASS_BUTTON)\n Gtk.render_background(context, cr, x, y, width, height)\n context.restore()\n\n if self.has_focus:\n Gtk.render_focus(context, cr,\n x + 3, y + 3,\n width - 6, height - 6)\n\n # position and render layout markup\n context.save()\n context.add_class(Gtk.STYLE_CLASS_BUTTON)\n layout.set_markup(self.markup_variants[self.current_variant], -1)\n layout_width = layout.get_pixel_extents()[1].width\n x = x + (width - layout_width) / 2\n y += self.ypad\n Gtk.render_layout(context, cr, x, y, layout)\n context.restore()\n\n context.restore()\n","sub_path":"mp4/SD_card/partition1/usr/share/software-center/softwarecenter/ui/gtk3/widgets/cellrenderers.py","file_name":"cellrenderers.py","file_ext":"py","file_size_in_byte":18067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"273461734","text":"# **************************************************************************** #\n# #\n# ::: :::::::: #\n# csvreader.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: eduriez +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2020/03/11 17:07:28 by eduriez #+# #+# #\n# Updated: 2020/03/11 20:13:33 by eduriez ### ########.fr #\n# #\n# **************************************************************************** #\n\nimport sys\n\nclass CsvReader:\n\tdef __init__(self, filename, sep=',', header=False, skip_top=0, skip_bottom=0):\n\t\ttry:\n\t\t\tfile_obj = open(filename)\n\t\t\tself.file_obj = file_obj\n\t\t\tif type(sep) == str:\n\t\t\t\tself.sep = sep\n\t\t\telse:\n\t\t\t\tprint(\"sep must be a string.\\n\", file=sys.stderr)\n\t\t\t\tself.file_obj.close()\n\t\t\t\tself.file_obj = None\n\t\t\tif type(header) == bool:\n\t\t\t\tself.header = header\n\t\t\telse:\n\t\t\t\tprint(\"header must be a bool.\\n\", file=sys.stderr)\n\t\t\t\tself.file_obj.close()\n\t\t\t\tself.file_obj = None\n\t\t\tif type(skip_top) != int or type(skip_bottom) != int or skip_top < 0 or skip_bottom < 0:\n\t\t\t\tprint(\"skip_top and skip_bottom must be both positive integers.\\n\", file=sys.stderr)\n\t\t\t\tself.file_obj.close()\n\t\t\t\tself.file_obj = None\n\t\t\tself.skip_top = skip_top\n\t\t\tself.skip_bottom = skip_bottom\n\t\t\tself.header_datas = []\n\t\t\tself.datas = []\n\t\texcept OSError as e:\n\t\t\tprint(f\"{filename} : {e.strerror}\")\n\t\t\tself.file_obj = None\n\n\tdef __enter__(self):\n\t\tif self.file_obj:\n\t\t\theader_datas = self.file_obj.readline().rstrip().split(self.sep)\n\t\t\tif self.header:\n\t\t\t\tself.header_datas = header_datas\n\t\t\tcurrent_line = self.file_obj.readline()\n\t\t\terror = False\n\t\t\theader_datas_len = len(header_datas)\n\t\t\tdatas = []\n\t\t\twhile not error and current_line:\n\t\t\t\tnew_row = current_line.rstrip().split(self.sep)\n\t\t\t\tlen_new_row = len(new_row)\n\t\t\t\tif len_new_row == header_datas_len and new_row[len_new_row - 1]:\n\t\t\t\t\tdatas.append(new_row)\n\t\t\t\t\tcurrent_line = self.file_obj.readline()\n\t\t\t\telse:\n\t\t\t\t\terror = True\n\t\t\tif error:\n\t\t\t\treturn None\n\t\t\tif self.skip_top > 0:\n\t\t\t\tdatas = datas[self.skip_top:]\n\t\t\tif self.skip_bottom > 0:\n\t\t\t\tdatas = datas[:-self.skip_bottom]\n\t\t\tself.datas = datas\n\t\t\treturn self\n\t\treturn None\n\n\tdef __exit__(self, exec_type, exec_value, exec_traceback):\n\t\tif self.file_obj:\n\t\t\tself.file_obj.close()\n\t\treturn True\n\t\n\tdef getdata(self):\n\t\treturn self.datas\n\n\tdef getheader(self):\n\t\treturn self.header_datas\n\nif __name__ == \"__main__\":\n\twith CsvReader('good.csv', sep=\",\", header=True, skip_top=1, skip_bottom=1) as csv_reader:\n\t\tif csv_reader != None:\n\t\t\tprint(f\"HEADER : {csv_reader.getheader()}\\n\")\n\t\t\tprint(f\"DATAS : {csv_reader.getdata()}\\n\")\n\t\telse:\n\t\t\tprint(\"ERROR\")\n","sub_path":"bootcamp_python_for_ML/D02/ex03/csvreader.py","file_name":"csvreader.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"487710274","text":"import time\nimport os\nimport unittest\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver import ActionChains\nfrom Automate_Tests_Content.Web.Selenuim.FullRegression.InfoButtonMenu.info_button_utility import InfoButtonUtility\nfrom Automate_Tests_Content.Web.Selenuim.logger import Logger\nfrom Automate_Tests_Content.Web.Selenuim.utils.full_regression_utility import FullRegressionUtility\n\n__author__ = \"Chulud Mallak\"\n__copyright__ = \"view_mode_preview\"\n__credits__ = [\"Chulud Mallak\"]\n__version__ = \"1.0.0\"\n__maintainer__ = \"Yaacov Pinhas\"\n__email__ = \"chuludx.mallak@intel.com\"\n__status__ = \"Production\"\n\n\nclass FileUnderConflict(unittest.TestCase):\n logger = Logger()\n __full_regression_utility = FullRegressionUtility()\n __info_button_utility = InfoButtonUtility()\n __path_of_chrome_driver = os.path.join(os.getcwd(), \"bin\", \"chromedriver.exe\")\n __browser = webdriver.Chrome(executable_path=__path_of_chrome_driver)\n __url_of_conflict_file = \"https://onesource-cons.intel.com/Predator/Home/Index/73506\"\n __MyEditsFrame = \"https://onesource-cons.intel.com/Predator/Home/MyDashboard/Edits\"\n\n def click_conflict_button(self, browser=__browser):\n try:\n table = browser.find_element_by_id(\"MyEditsTable\")\n buttons_array = table.find_elements_by_tag_name(\"button\")\n for i in range(len(buttons_array) - 1, 0, -1):\n if buttons_array[i].get_attribute(\"data-mapnodeid\") == \"73506\":\n buttons_array[i].click()\n time.sleep(10)\n break\n except Exception as e:\n self.logger.Error(str(e))\n\n def next_conflict(self, conflict_window):\n try:\n merge_content_div = conflict_window.find_element_by_class_name(\"MergeConflictDialog\")\n selected_lines_div = merge_content_div.find_elements_by_class_name(\"SelectedLinesDiv\")\n text = selected_lines_div[0].find_element_by_class_name(\"text-area-top-border\")\n if \"ConfigVariableCollectionOneSourceNode\" in text:\n self.logger.Info(\"Click on Next Conflict button succeeded...\")\n except Exception as e:\n self.logger.Error(str(e))\n\n def write_in_merge_file(self, conflict_window, browser=__browser):\n try:\n action = ActionChains(browser)\n div = conflict_window.find_element_by_css_selector(\"span.cm-string.CodeMirror-merge-r-inserted\")\n action.move_to_element(div)\n time.sleep(3)\n action.click(div)\n time.sleep(1)\n action.send_keys_to_element(browser, \"NfNN\", div)\n\n except Exception as e:\n self.logger.Error(str(e))\n\n def test_01_ConflictFilePreview(self, browser=__browser):\n try:\n text_message = \"This working copy has merge conflicts which need to be resolved\"\n self.logger.Info(\"Tests File Under Conflict:\")\n self.logger.Info(\"In ALM 5.5.01 File Under Conflict \\n \")\n self.logger.Info(\"...\\n\")\n self.logger.Info(\"Attempting navigation to: {}\\n\".format(self.__url_of_conflict_file))\n browser.get(self.__url_of_conflict_file)\n time.sleep(10)\n self.__full_regression_utility.cancel_edit_mode(browser)\n self.__full_regression_utility.switch_browser_to_frame(\"ContentOuterIFrame\", browser)\n self.__info_button_utility.Cont_Scroll_Off(browser)\n self.__full_regression_utility.click_on_info_button(browser)\n self.__full_regression_utility.click_option_menu_button(\"History\", browser) # open History Tab\n self.__full_regression_utility.click_on_info_button(browser)\n history_table = browser.find_element_by_id(\"RevisionHistory73506\")\n table = history_table.find_element_by_class_name(\"obj\")\n td = table.find_elements_by_tag_name(\"td\")\n for i in td:\n if i.text == \"Merge Conflict\":\n i.click()\n time.sleep(10)\n break\n message_box = browser.find_element_by_id(\"MessageBox\")\n if text_message in message_box.text:\n self.logger.Info(\"Working Copy in Merge Conflict popup window opened Successfully...\")\n message_box.find_element_by_id(\"MessageBoxOkButton\").click()\n time.sleep(5)\n try:\n browser.find_element_by_id(\"MessageBox\")\n except NoSuchElementException:\n self.logger.Info(\"Working Copy in Merge Conflict popup window closed successfully...\\n\")\n except Exception as e:\n self.logger.Error(str(e))\n\n def test_02_ActionsInMergeUI(self, browser=__browser):\n try:\n self.logger.Info(\"Tests Actions in Merge UI:\")\n self.logger.Info(\"In ALM 5.5.04 Actions in Merge UI \\n \")\n self.logger.Info(\"...\\n\")\n self.logger.Info(\"Attempting navigation to: {}\\n\".format(self.__MyEditsFrame))\n browser.get(self.__MyEditsFrame)\n time.sleep(15)\n self.click_conflict_button()\n conflict_window = browser.find_element_by_class_name(\"ui-dialog\")\n title = conflict_window.find_element_by_id(\"ui-id-2\").text\n if \"Resolve Merge Conflict\" in title:\n self.logger.Info(\"Resolve Merge Conflict popup window opened Successfully...\")\n dialog_buttons = conflict_window.find_element_by_id(\"ui-id-1\").find_elements_by_tag_name(\"button\")\n for btn in dialog_buttons:\n title = btn.get_attribute(\"title\")\n if \"Next Conflict\" in title:\n btn.click()\n time.sleep(5)\n self.next_conflict(conflict_window)\n if \"Undo\" in title:\n self.write_in_merge_file(conflict_window)\n btn.click()\n time.sleep(5)\n self.logger.Info(\"Click on Undo button succeeded...\")\n if \"Redo\" in title:\n btn.click()\n time.sleep(5)\n self.logger.Info(\"Click on Redo button succeeded...\")\n except Exception as e:\n self.logger.Error(str(e))\n\n @classmethod\n def tearDownClass(cls):\n cls.__browser.quit()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Automate_Tests_Content/Web/Selenuim/FullRegression/MergeUIDisplay/file_under_conflict.py","file_name":"file_under_conflict.py","file_ext":"py","file_size_in_byte":6767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"62998446","text":"#\n# Tests for the Casadi Algebraic Solver class\n#\nimport casadi\nimport pybamm\nimport unittest\nimport numpy as np\n\n\nclass TestCasadiAlgebraicSolver(unittest.TestCase):\n def test_algebraic_solver_init(self):\n solver = pybamm.CasadiAlgebraicSolver(tol=1e-4)\n self.assertEqual(solver.tol, 1e-4)\n\n solver.tol = 1e-5\n self.assertEqual(solver.tol, 1e-5)\n\n def test_simple_root_find(self):\n # Simple system: a single algebraic equation\n var = pybamm.Variable(\"var\")\n model = pybamm.BaseModel()\n model.algebraic = {var: var + 2}\n model.initial_conditions = {var: 2}\n\n # create discretisation\n disc = pybamm.Discretisation()\n disc.process_model(model)\n\n # Solve\n solver = pybamm.CasadiAlgebraicSolver()\n solution = solver.solve(model, np.linspace(0, 1, 10))\n np.testing.assert_array_equal(solution.y, -2)\n\n def test_root_find_fail(self):\n class Model:\n y0 = np.array([2])\n t = casadi.MX.sym(\"t\")\n y = casadi.MX.sym(\"y\")\n p = casadi.MX.sym(\"p\")\n casadi_algebraic = casadi.Function(\"alg\", [t, y, p], [y ** 2 + 1])\n\n def algebraic_eval(self, t, y, inputs):\n # algebraic equation has no real root\n return y ** 2 + 1\n\n model = Model()\n\n solver = pybamm.CasadiAlgebraicSolver()\n with self.assertRaisesRegex(\n pybamm.SolverError, \"Could not find acceptable solution: .../casadi\",\n ):\n solver._integrate(model, np.array([0]), {})\n solver = pybamm.CasadiAlgebraicSolver(error_on_fail=False)\n with self.assertRaisesRegex(\n pybamm.SolverError, \"Could not find acceptable solution: solver terminated\",\n ):\n solver._integrate(model, np.array([0]), {})\n\n def test_model_solver_with_time(self):\n # Create model\n model = pybamm.BaseModel()\n var1 = pybamm.Variable(\"var1\")\n var2 = pybamm.Variable(\"var2\")\n model.algebraic = {var1: var1 - 3 * pybamm.t, var2: 2 * var1 - var2}\n model.initial_conditions = {var1: pybamm.Scalar(1), var2: pybamm.Scalar(4)}\n model.variables = {\"var1\": var1, \"var2\": var2}\n\n disc = pybamm.Discretisation()\n disc.process_model(model)\n\n # Solve\n t_eval = np.linspace(0, 1)\n solver = pybamm.CasadiAlgebraicSolver()\n solution = solver.solve(model, t_eval)\n\n sol = np.vstack((3 * t_eval, 6 * t_eval))\n np.testing.assert_array_almost_equal(solution.y, sol)\n np.testing.assert_array_almost_equal(\n model.variables[\"var1\"].evaluate(t=t_eval, y=solution.y).flatten(),\n sol[0, :],\n )\n np.testing.assert_array_almost_equal(\n model.variables[\"var2\"].evaluate(t=t_eval, y=solution.y).flatten(),\n sol[1, :],\n )\n\n def test_solve_with_input(self):\n # Simple system: a single algebraic equation\n var = pybamm.Variable(\"var\")\n model = pybamm.BaseModel()\n model.algebraic = {var: var + pybamm.InputParameter(\"value\")}\n model.initial_conditions = {var: 2}\n\n # create discretisation\n disc = pybamm.Discretisation()\n disc.process_model(model)\n\n # Solve\n solver = pybamm.CasadiAlgebraicSolver()\n solution = solver.solve(model, np.linspace(0, 1, 10), inputs={\"value\": 7})\n np.testing.assert_array_equal(solution.y, -7)\n\n\nif __name__ == \"__main__\":\n print(\"Add -v for more debug output\")\n import sys\n\n if \"-v\" in sys.argv:\n debug = True\n pybamm.settings.debug_mode = True\n unittest.main()\n","sub_path":"tests/unit/test_solvers/test_casadi_algebraic_solver.py","file_name":"test_casadi_algebraic_solver.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"555018472","text":"from django.shortcuts import redirect\nfrom django.views.generic.detail import DetailView\nfrom django.urls import reverse_lazy, reverse\nfrom django.views.generic import RedirectView\n\nfrom witty_trips_project.trips.models import Trip\nfrom witty_trips_project.trips_temporary.models import TMPTrip\nfrom witty_trips_project.creator_puzzles.models import PuzzleSet\nfrom witty_trips_project.carts.models import CartItem, Cart\n\n\nclass TripSummaryPuzzlesMixin(object):\n puzzles_reused_list = None\n queryset = Trip.objects.select_related('puzzles', 'places')\n query_pk_and_slug = True\n tmp_trip_obj = None\n places_number = None\n requested_trip = None\n\n template_name = 'trips/summary.html'\n\n def get(self, request, *args, **kwargs):\n \"\"\"Get freshest puzzle-set and used puzzles list for user based on pk/slug trip. Get or create trip with\n freshest puzzle-set and set as working object.\"\"\"\n new_puzzles_obj, self.puzzles_reused_list = PuzzleSet.objects.user_fresh_puzzle_set(request=request,\n places_number=self.places_number,\n current_puzzle_set=self.requested_trip.puzzles,\n editing_cart_item=self.tmp_trip_obj.editing)\n\n self.tmp_trip_obj.puzzles = new_puzzles_obj\n self.tmp_trip_obj.places = self.requested_trip.places\n self.tmp_trip_obj.save()\n\n self.object, new_trip_obj = Trip.objects.new_or_get(place_set=self.requested_trip.places, puzzle_set=new_puzzles_obj)\n\n context = self.get_context_data(object=self.object)\n return super(TripSummaryPuzzlesMixin, self).render_to_response(context)\n\n def get_context_data(self, **kwargs):\n \"\"\"Check if trip is shared - if not, add browser trip context; add used puzzles; add place IDs; add if user\n has shared; add selected puzzles type/dif info.\"\"\"\n context = super(TripSummaryPuzzlesMixin, self).get_context_data(**kwargs)\n\n context['shareable'] = True\n trip_shared, shared_trip_obj = Trip.objects.trip_shared(trip_obj=self.object)\n context['browser_trip'] = None\n if trip_shared:\n context['browser_trip'] = shared_trip_obj\n context['shareable'] = False\n\n context['places'] = self.object.get_dict_of_place_ids()\n context['puzzles_reused'] = self.puzzles_reused_list\n context['editing_obj'] = self.tmp_trip_obj.editing\n context['places_number'] = self.places_number\n context['number_puzzles'] = self.object.number_of_puzzles()\n\n return context\n\n\nclass TripSummaryDetailView(TripSummaryPuzzlesMixin, DetailView):\n\n def dispatch(self, request, *args, **kwargs):\n self.requested_trip = self.get_object()\n self.places_number = self.requested_trip.number_of_stops()\n if self.places_number < 2:\n return redirect('creator-places:create')\n\n self.tmp_trip_obj, new_tmp_obj = TMPTrip.objects.new_or_get(request) # Load session temporary trip\n\n return super(TripSummaryDetailView, self).dispatch(request, *args, **kwargs)\n\n\nclass TripSummaryMobileDetailView(TripSummaryPuzzlesMixin, DetailView):\n template_name = 'trips/summary-mobile.html'\n\n def dispatch(self, request, *args, **kwargs):\n self.requested_trip = self.get_object()\n self.places_number = self.requested_trip.number_of_stops()\n if self.places_number < 2:\n return redirect('creator-places:create')\n\n self.tmp_trip_obj, new_tmp_obj = TMPTrip.objects.new_or_get(request) # Load session temporary trip\n\n return super(TripSummaryMobileDetailView, self).dispatch(request, *args, **kwargs)\n\n\nclass TripSummaryEditDetailView(TripSummaryPuzzlesMixin, DetailView):\n\n def dispatch(self, request, *args, **kwargs):\n self.requested_trip = self.get_object()\n\n self.places_number = self.requested_trip.number_of_stops()\n if self.places_number < 2:\n return redirect('creator-places:create')\n\n self.tmp_trip_obj, new_tmp_obj = TMPTrip.objects.new_or_get(request) # Load session temporary trip\n\n cart_obj, new_obj = Cart.objects.new_or_get(request)\n cart_item_obj = CartItem.objects.filter(cart=cart_obj, trip=self.requested_trip).first()\n\n self.tmp_trip_obj.editing = cart_item_obj\n\n if not self.tmp_trip_obj.editing:\n return redirect(reverse('trips:summary', kwargs={'pk': self.requested_trip.pk,\n 'slug': self.requested_trip.slug}))\n\n return super(TripSummaryEditDetailView, self).dispatch(request, *args, **kwargs)\n","sub_path":"witty_trips_project/trips/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"3088811","text":"import matplotlib.pyplot as plt\nimport lsst.sims.maf.metricBundles as metricBundles\nimport lsst.sims.maf.slicers as slicers\nimport lsst.sims.maf.db as db\nimport argparse\nimport time\nimport yaml\nfrom importlib import import_module\n# import sqlite3\nimport numpy as np\nfrom sn_tools.sn_cadence_tools import ReferenceData\nimport healpy as hp\nimport numpy.lib.recfunctions as rf\nimport sn_plotters.sn_snrPlotters as sn_plot\n\nparser = argparse.ArgumentParser(\n description='Run a SN metric from a configuration file')\nparser.add_argument('config_filename',\n help='Configuration file in YAML format.')\n\n\ndef run(config_filename):\n # YAML input file.\n config = yaml.load(open(config_filename), Loader=yaml.FullLoader)\n # print(config)\n outDir = 'Test' # this is for MAF\n\n # grab the db filename from yaml input file\n dbFile = config['Observations']['filename']\n\n \"\"\"\n conn = sqlite3.connect(dbFile)\n cur = conn.cursor()\n table_name='Proposal'\n result = cur.execute(\"PRAGMA table_info('%s')\" % table_name).fetchall()\n print('Results',result)\n\n cur.execute(\"SELECT * FROM Proposal\")\n rows = cur.fetchall()\n for row in rows:\n print(row)\n print('end')\n cur.execute('PRAGMA TABLE_INFO({})'.format('ObsHistory'))\n\n names = [tup[1] for tup in cur.fetchall()]\n print(names)\n \"\"\"\n opsimdb = db.OpsimDatabase(dbFile)\n # version = opsimdb.opsimVersion\n propinfo, proptags = opsimdb.fetchPropInfo()\n print('proptags and propinfo', proptags, propinfo)\n\n # grab the fieldtype (DD or WFD) from yaml input file\n fieldtype = config['Observations']['fieldtype']\n fake_file = config['Fake_file']\n module = import_module(config['Metric'])\n\n slicer = slicers.HealpixSlicer(nside=config['Pixelisation']['nside'])\n\n sqlconstraint = opsimdb.createSQLWhere(fieldtype, proptags)\n\n bundles = []\n names = []\n lim_sn = {}\n bands = config['Observations']['bands']\n z = config['Observations']['z']\n metric = {}\n # processing. Band after band\n\n Ra_ref = 0.000\n Dec_ref = -2.308039\n time_ref = time.time()\n for band in bands:\n sql_i = sqlconstraint+' AND '\n sql_i += 'filter = \"%s\"' % (band)\n # sql_i += ' AND abs(fieldRA-(%f))< %f' % (Ra_ref, 1.e-2)+' AND '\n # sql_i += 'abs(fieldDec-(%f))< %f' % (Dec_ref, 1.e-2)\n\n lim_sn[band] = ReferenceData(\n config['Li file'], config['Mag_to_flux file'], band, z)\n\n metric[band] = module.SNSNRMetric(lim_sn=lim_sn[band], names_ref=config['names_ref'], fake_file=fake_file, coadd=config['Observations']\n ['coadd'], z=z, display=config['Display_Processing'], season=config['Observations']['season'])\n bundles.append(metricBundles.MetricBundle(metric[band], slicer, sql_i))\n names.append(band)\n\n bdict = dict(zip(names, bundles))\n\n resultsDb = db.ResultsDb(outDir='None')\n mbg = metricBundles.MetricBundleGroup(bdict, opsimdb,\n outDir=outDir, resultsDb=resultsDb)\n\n mbg.runAll()\n\n # Let us display the results\n\n for band, val in bdict.items():\n metValues = val.metricValues[~val.metricValues.mask]\n res = None\n for vals in metValues:\n if res is None:\n res = vals\n else:\n res = np.concatenate((res, vals))\n res = np.unique(res)\n\n \"\"\"\n sn_plot.detecFracPlot(res, config['Pixelisation']\n ['nside'], config['names_ref'])\n\n sn_plot.detecFracHist(res, config['names_ref'])\n \"\"\"\n plt.show()\n\n\ndef main(args):\n print('running')\n time_ref = time.time()\n run(args.config_filename)\n print('Time', time.time()-time_ref)\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n main(args)\n","sub_path":"run_scripts/metrics/run_snr_metric.py","file_name":"run_snr_metric.py","file_ext":"py","file_size_in_byte":3851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"119157069","text":"inFile = open(\"in.txt\", 'r')\n\nnumStudents, numAssignments = None, None\nstudentAvgs = {}\n\nfor i, line in enumerate(inFile):\n\tif i == 0:\n\t\tnumStudents, numAssignments = map(int, line.split(' '))\n\t\tcontinue\n\tline = line.split(' ')\n\tstudentAvgs[line[0]] = sum(map(int, line[1:]))/float(numAssignments)\n\t\ninFile.close()\n\nclassAvgGrade = 0.0\nfor grade in studentAvgs.values():\n\tclassAvgGrade += grade\nclassAvgGrade = classAvgGrade/numStudents\n\noutFile = open(\"out.txt\", 'w')\noutFile.write('{:.2f}\\n'.format(classAvgGrade))\nfor student, avgGrade in sorted(studentAvgs.iteritems()):\n\toutFile.write('{} {:.2f}\\n'.format(student, avgGrade))","sub_path":"studentManagement.py","file_name":"studentManagement.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"470802753","text":"import numpy as np\n\n\ndef dihedral(p0, p1, p2, p3):\n \"\"\"formula from Wikipedia article on \"Dihedral angle\"\"\"\n p0 = np.array(p0)\n p1 = np.array(p1)\n p2 = np.array(p2)\n p3 = np.array(p3)\n\n b0 = -1.0 * (p1 - p0)\n b1 = p2 - p1\n b2 = p3 - p2\n\n b0xb1 = np.cross(b0, b1)\n b1xb2 = np.cross(b2, b1)\n\n b0xb1_x_b1xb2 = np.cross(b0xb1, b1xb2)\n\n y = np.dot(b0xb1_x_b1xb2, b1) * (1.0 / np.linalg.norm(b1))\n x = np.dot(b0xb1, b1xb2)\n\n return np.degrees(np.arctan2(y, x))\n","sub_path":"2018/XYZdihedral.py","file_name":"XYZdihedral.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"142289000","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport logging\nfrom twisted.enterprise import adbapi\nfrom scrapy.http import Request\nimport MySQLdb\nimport MySQLdb.cursors\n\nclass DoubanPipeline(object):\n def __init__(self):\n self.dbpool=adbapi.ConnectionPool(\n 'MySQLdb',\n db='douban',\n user='root',\n passwd='shi',\n charset='utf8',\n cursorclass = MySQLdb.cursors.DictCursor,\n use_unicode= True\n )\n\n def process_item(self, item, spider):\n if item.get('rank'):\n query=self.dbpool.runInteraction(self.conditional_insert, item)\n query.addErrback(self.handle_error)\n return item\n else:\n query=self.dbpool.runInteraction(self.conditional_insert_review, item)\n query.addErrback(self.handle_error)\n return item\n\n def handle_error(self,e):\n logging.error(e)\n\n def conditional_insert(self,tx,item):\n #the second parameter in execute must be iterable!\n tx.execute(\"select * from top250 where rank = %s\", [item['rank']])\n result=tx.fetchone()\n if result:\n #logging.debug(result)\n logging.debug(\"Item already stored in db:%s\" % item)\n else:\n tx.execute(\"insert into top250 (name,year,score,director,script,classification,actor,story,rank) values (%s,%s,%s,%s,%s,%s,%s,%s,%s)\",\n (item['name'], item['year'], item['score'], item['director'],\n item['script'], item['classification'], item['actor'], item['story'], item['rank']))\n logging.debug(\"Item stored in db: %s\" % item)\n\n def conditional_insert_review(self,tx,item):\n #防止重复添加机制有待提高。。。\n tx.execute(\"select * from reviews where title = %s and user = %s\", [item['title'],item['user']])\n result=tx.fetchone()\n if result:\n #logging.debug(result)\n logging.debug(\"Item already stored in db:%s\" % item)\n else:\n tx.execute(\"insert into reviews (title,user,score,time,content) values (%s,%s,%s,%s,%s)\",\n (item['title'], item['user'], item['score'], item['time'], item['content']))\n logging.debug(\"Item stored in db: %s\" % item)\n","sub_path":"douban/douban/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"247756479","text":"import matplotlib.pyplot as plt\r\n\r\nimport spiral.agents.default as default_agent\r\nimport spiral.agents.utils as agent_utils\r\nimport spiral.environments.libmypaint as libmypaint\r\n\r\nimport numpy as np\r\n\r\n# The path to a TF-Hub module.\r\nMODULE_PATH = \"https://tfhub.dev/deepmind/spiral/default-wgangp-celebahq64-gen-19steps/agent4/1\"\r\n# The folder containing `libmypaint` brushes.\r\nBRUSHES_PATH = \"/mnt/c/Users/rmqli/spiral/third_party/mypaint-brushes-1.3.0/\"\r\n\r\n# Here, we create an environment.\r\nenv = libmypaint.LibMyPaint(episode_length=20,\r\n canvas_width=64,\r\n grid_width=32,\r\n brush_type=\"classic/pen\",\r\n brush_sizes=[1, 2, 4, 8, 12, 24],\r\n use_color=True,\r\n use_pressure=True,\r\n use_alpha=False,\r\n background=\"white\",\r\n brushes_basedir=BRUSHES_PATH)\r\n\r\n\r\n# Now we load the agent from a snapshot.\r\ninitial_state, step = agent_utils.get_module_wrappers(MODULE_PATH)\r\n\r\n# Everything is ready for sampling.\r\nstate = initial_state()\r\nnoise_sample = np.random.normal(size=(10,)).astype(np.float32)\r\n\r\ntime_step = env.reset()\r\nfor t in range(10):\r\n # time_step = env.reset()\r\n time_step.observation[\"noise_sample\"] = noise_sample\r\n action, state = step(time_step.step_type, time_step.observation, state)\r\n print(action)\r\n time_step = env.step(action)\r\n canvas = time_step.observation[\"canvas\"]\r\n print('canvas', canvas.shape, np.min(canvas), np.max(canvas))\r\n# plt.imshow(time_step.observation[\"canvas\"], interpolation=\"nearest\")\r\n# plt.show()\r\n# Show the sample.\r\n# plt.close(\"all\")\r\n# plt.imshow(time_step.observation[\"canvas\"], interpolation=\"nearest\")","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"305004695","text":"from phue import Bridge\nimport random\n\nipAddr=\"192.168.10.109\" #Enter your ip address\nbridge = Bridge(ipAddr)\nbridge.connect()\nlights = bridge.get_light_objects()\n\ndef onoffControl(lightID, signal):\n\tglobal lights\n\tcount = 0\n\tfor light in lights:\n\t\tcount += 1\n\t\tif count == lightID:\n\t\t\tprint(\"lamp ID :\",lightID,\", lamp name : \"+ light.name)\n\t\t\tif signal == 0:\n\t\t\t\tlight.on = False\n\t\t\t\tprint(\"Lamp off\")\n\t\t\telse:\n\t\t\t\tlight.on = True\n\t\t\t\tprint(\"Lamp on\")\n\ndef brightnessControl(lightID, bright):\n\tglobal lights\n\tcount = 0\n\tfor light in lights:\n\t\tcount += 1\n\t\tif count == lightID:\n\t\t\tprint(\"lamp ID :\",lightID,\", lamp name : \"+ light.name)\n\t\t\tif bright <= 0:\n\t\t\t\tlight.on = False\n\t\t\t\tprint(\"Lamp off\")\n\t\t\telif bright <= 255:\n\t\t\t\tlight.brightness = bright\n\t\t\t\tprint(\"Lamp brightness change to \",bright)\n\ndef colorControl(lightID, Xcolor=0.5, Ycolor=0.5):\n\tglobal lights\n\tcount = 0\n\tfor light in lights:\n\t\tcount += 1\n\t\tif count == lightID:\n\t\t\tprint(\"lamp ID :\",lightID,\", lamp name : \"+ light.name)\n\t\t\tif Xcolor <= 1 and Ycolor <= 1:\n\t\t\t\tlight.xy = [Xcolor, Ycolor]\n\t\t\t\tprint(\"Lamp color change to [\",Xcolor,\",\",Ycolor,\"]\")\n\nif __name__ == \"__main__\":\n\tonoffControl(1,1)\n\tbrightnessControl(1, 12)\n\tcolorControl(1, 0.4, 0.65)\n","sub_path":"hw09/hueControl.py","file_name":"hueControl.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"426465612","text":"\"\"\"\nGoogle Cloud Messaging\nPreviously known as C2DM\nDocumentation is available on the Android Developer website:\nhttps://developer.android.com/google/gcm/index.html\n\"\"\"\n\nimport json\nimport requests\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom . import NotificationError\nfrom .settings import PUSH_NOTIFICATIONS_SETTINGS as SETTINGS\n\nfrom push_notifications.models import GCMDevice\n\n\nclass GCMError(NotificationError):\n pass\n\n\ndef _chunks(l, n):\n \"\"\"\n Yield successive chunks from list \\a l with a minimum size \\a n\n \"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n\ndef _gcm_send(data, content_type='application/json', gcm_key=None):\n if gcm_key is None:\n key = SETTINGS.get(\"GCM_API_KEY\")\n else:\n key = gcm_key\n if not key:\n raise ImproperlyConfigured(\n 'You need to set PUSH_NOTIFICATIONS_SETTINGS[\"GCM_API_KEY\"] to send messages through GCM.')\n\n headers = {\n \"UserAgent\": \"GCM-Server\",\n \"Content-Type\": content_type,\n \"Authorization\": \"key=%s\" % key,\n }\n\n registration_ids = data.get('registration_ids')\n data = json.dumps(data)\n\n response = requests.post(url=SETTINGS[\"GCM_POST_URL\"],\n data=data,\n headers=headers)\n\n return process_response_for_errors(registration_ids, response.json())\n\n\ndef process_response_for_errors(recipient_list, response):\n results = response.get('results')\n\n for r in zip(results, recipient_list):\n if r[0].get('error') == 'NotRegistered':\n device = GCMDevice.objects.filter(registration_id=r[1])\n for d in device:\n d.active = False\n d.save()\n if r[0].get('message_id'):\n if r[0].get('registration_id'):\n device = GCMDevice.objects.filter(registration_id=r[1])\n for d in device:\n d.registration_id = r[0].get('registration_id')\n d.save()\n\n return response\n\n\ndef gcm_send_message(registration_id, data, collapse_key=None, time_to_live=None,delay_while_idle=False, gcm_key=None):\n \"\"\"\n Sends a GCM notification to a single registration_id.\n This will send the notification as form data.\n If sending multiple notifications, it is more efficient to use\n gcm_send_bulk_message()\n \"\"\"\n\n values = {\n \"registration_ids\": [registration_id,],\n \"data\": data\n }\n\n if collapse_key:\n values[\"collapse_key\"] = collapse_key\n\n if delay_while_idle:\n values[\"delay_while_idle\"] = delay_while_idle\n\n if time_to_live:\n values[\"time_to_live\"] = time_to_live\n\n return _gcm_send(values, gcm_key=gcm_key)\n\n\ndef gcm_send_bulk_message(registration_ids, data, collapse_key=None, time_to_live=None, delay_while_idle=False, gcm_key=None):\n \"\"\"\n Sends a GCM notification to one or more registration_ids. The registration_ids\n needs to be a list.\n This will send the notification as json data.\n \"\"\"\n\n # GCM only allows up to 1000 reg ids per bulk message\n # https://developer.android.com/google/gcm/gcm.html#request\n max_recipients = SETTINGS.get(\"GCM_MAX_RECIPIENTS\")\n if len(registration_ids) > max_recipients:\n ret = []\n for chunk in _chunks(registration_ids, max_recipients):\n ret.append(gcm_send_bulk_message(chunk, data, collapse_key, time_to_live, delay_while_idle))\n return ret\n\n values = {\n \"registration_ids\": registration_ids,\n \"data\": data,\n }\n\n if collapse_key:\n values[\"collapse_key\"] = collapse_key\n\n if delay_while_idle:\n values[\"delay_while_idle\"] = delay_while_idle\n\n if time_to_live:\n values[\"time_to_live\"] = time_to_live\n\n return _gcm_send(values, gcm_key=gcm_key)\n","sub_path":"push_notifications/gcm.py","file_name":"gcm.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"196402233","text":"from django.views.generic import TemplateView\nfrom .models import Category, Article, ArticleTag, Comment, Tag\nfrom django.db.models import Count, Sum\n\n\nclass BaseUserView(TemplateView):\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n most_popular_categories = Category.objects.annotate(articles_count=Count('article')).order_by(\n '-articles_count')[:3]\n most_commented_articles = Article.objects.annotate(comment_count=Count('comment')).order_by('-comment_count')[\n :10]\n most_populated_tags = Tag.objects.annotate(num_articles=Count('article__title')).order_by('-num_articles')[:10]\n\n context.update({\n 'most_commented_articles': most_commented_articles,\n 'most_popular_categories': most_popular_categories,\n 'most_populated_tags': most_populated_tags,\n })\n\n return context\n\n\nclass IndexView(TemplateView):\n template_name = 'blog/index.tpl'\n\n def get(self, request, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n\n categories = Category.objects.all()\n\n most_popular_categories = Category.objects.annotate(articles_count=Count('article')).order_by('-articles_count')[:3]\n most_commented_articles = Article.objects.annotate(comment_count=Count('comment')).order_by('-comment_count')[:10]\n most_populated_tags = Tag.objects.annotate(num_articles=Count('article__title')).order_by('-num_articles')[:10]\n\n context.update({\n 'categories': categories,\n 'most_commented_articles': most_commented_articles,\n 'most_popular_categories': most_popular_categories,\n 'most_populated_tags': most_populated_tags,\n })\n return self.render_to_response(context)\n\n\nclass CategoryView(BaseUserView):\n template_name = 'blog/category.tpl'\n\n def get(self, request, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n\n category = Category.objects.get(id=kwargs.get('category_id'))\n articles = Article.objects.filter(category_id=category.id)\n\n context.update({\n 'category': category,\n 'articles': articles\n })\n\n return self.render_to_response(context)\n\n\nclass ArticleView(BaseUserView):\n template_name = 'blog/article.tpl'\n\n def get(self, request, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n\n article = Article.objects.get(id=kwargs.get('article_id'))\n\n context.update({\n 'article': article\n })\n\n return self.render_to_response(context)\n\n\nclass ArticleListView(BaseUserView):\n template_name = 'blog/articles.tpl'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n # articles = Article.objects.all()[:3]\n articles = Article.objects.all()\n context.update({\n 'articles': articles\n })\n\n return context\n\n\nclass TagListView(BaseUserView):\n template_name = 'blog/tags.tpl'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n tags = Tag.objects.all()\n context.update({\n 'tags': tags\n })\n\n return context\n\n # def get(self, request, *args, **kwargs):\n # context = super().get_context_data(**kwargs)\n #\n # tags = Tag.objects.all()\n # context.update({\n # 'tags': tags\n # })\n #\n # return self.render_to_response(context)\n\n\nclass CategoryListView(TemplateView):\n pass\n\n\nclass ArticleTagView(BaseUserView):\n template_name = 'blog/tag_article.tpl'\n\n def get(self, request, *args, **kwargs):\n context = super().get_context_data(**kwargs)\n\n tag = Tag.objects.get(id=kwargs.get('tag_id'))\n articles = Article.objects.filter(tags=tag.id)\n\n context.update({\n 'tag': tag,\n 'articles': articles\n })\n\n return self.render_to_response(context)\n","sub_path":"blog/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"573754442","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields, api, _\nfrom datetime import datetime, timedelta\nimport logging\n\n_logger = logging.getLogger(__name__)\n\nclass pos_voucher(models.Model):\n _name = \"pos.voucher\"\n _rec_name = 'code'\n _order = 'end_date'\n _description = \"Management POS voucher\"\n\n customer_id = fields.Many2one('res.partner', string='Customer', domain=[('customer', '=', True)])\n code = fields.Char('Code')\n start_date = fields.Datetime('Start date', required=1)\n end_date = fields.Datetime('End date', required=1)\n state = fields.Selection([\n ('active', 'Active'),\n ('used', 'Used'),\n ('removed', 'Removed')\n ], string='State', default='active')\n value = fields.Float('Value of voucher')\n apply_type = fields.Selection([\n ('fixed_amount', 'Fixed amount'),\n ('percent', 'Percent (%)'),\n ], string='Apply type', default='fixed_amount')\n method = fields.Selection([\n ('general', 'General'),\n ('special_customer', 'Special Customer'),\n ], string='Method', default='general')\n use_date = fields.Datetime('Use date')\n\n @api.model\n def create(self, vals):\n voucher = super(pos_voucher, self).create(vals)\n if not voucher.code:\n format_code = \"%s%s%s\" % ('999', voucher.id, datetime.now().strftime(\"%d%m%y%H%M\"))\n code = self.env['barcode.nomenclature'].sanitize_ean(format_code)\n voucher.write({'code': code})\n return voucher\n\n @api.multi\n def remove_voucher(self):\n return self.write({\n 'state': 'removed'\n })\n\n @api.model\n def create_voucher(self, vals):\n _logger.info('{create_voucher}: %s' % vals)\n datas_response = []\n today = datetime.today()\n products = self.env['product.product'].search([('name', '=', 'Voucher service')])\n for i in range(0, vals['total_available']):\n customer_id = None\n if vals.get('special_customer', None) == 'special_customer':\n customer_id = vals.get('customer_id', None)\n voucher_vals = {\n 'apply_type': vals.get('apply_type', ''),\n 'value': vals.get('value', 0),\n 'method': vals.get('method'),\n 'customer_id': customer_id,\n 'start_date': fields.Datetime.now(),\n 'end_date': today + timedelta(days=vals['period_days'])\n }\n if products:\n voucher_vals.update({'product_id': products[0].id})\n voucher = self.create(voucher_vals)\n format_code = \"%s%s%s\" % ('999', voucher.id, datetime.now().strftime(\"%d%m%y%H%M\"))\n code = self.env['barcode.nomenclature'].sanitize_ean(format_code)\n voucher.write({'code': code})\n if voucher.method == 'special_customer':\n method = 'Special Customer'\n else:\n method = 'General'\n if voucher.apply_type == 'fixed_amount':\n apply_type = 'Fixed Amount'\n else:\n apply_type = 'Percent (%)'\n datas_response.append({\n 'code': code,\n 'partner_name': voucher.customer_id.name if voucher.customer_id else '',\n 'method': method,\n 'apply_type': apply_type,\n 'value': voucher.value,\n 'end_date': voucher.end_date,\n 'id': voucher.id,\n })\n return datas_response\n\n @api.model\n def get_voucher_by_code(self, code):\n vouchers = self.env['pos.voucher'].search(\n [('code', '=', code), ('end_date', '>=', fields.Datetime.now()), ('state', '=', 'active')])\n if not vouchers:\n return -1\n else:\n return vouchers.read([])[0]\n","sub_path":"pos_retail/models/pos/pos_voucher.py","file_name":"pos_voucher.py","file_ext":"py","file_size_in_byte":3820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"438224225","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('coda_mdstore', '0001_initial'),\n ]\n\n operations = [\n migrations.RunSQL(\n 'CREATE FULLTEXT INDEX field_body on coda_mdstore_bag_info(field_body);',\n reverse_sql='DROP INDEX field_body on coda_mdstore_bag_info;',\n )\n ]\n","sub_path":"coda/coda_mdstore/migrations/0002_add_fulltext_index.py","file_name":"0002_add_fulltext_index.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"277532096","text":"import lightgbm\nimport logging\nimport numpy as np\nfrom sklearn.datasets import load_svmlight_file\nimport timeit\n\nlogging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)\n\ndata_filename = '../testdata/higgs_1000examples_test.libsvm'\nmodel_filename ='../testdata/lghiggs.model'\ntrue_pred_filename ='../testdata/lghiggs_1000examples_true_predictions.txt'\n\nlogging.info(f'start loading test data from {data_filename}')\nX, _ = load_svmlight_file(data_filename, zero_based=True)\nlogging.info(f'load test data: {X.shape}')\n\nytrue = np.genfromtxt(true_pred_filename)\nlogging.info(f'load true predictions from {true_pred_filename}')\n\nlogging.info(f'start loading model from {model_filename}')\nlg = lightgbm.Booster(model_file=model_filename, params={'num_threads': 1})\nlogging.info(f'load model: {lg.num_feature()} features')\n\nlogging.info('compare predictions')\nypred = lg.predict(X, raw_score=True, num_threads=4)\n\nif np.allclose(ytrue, ypred):\n logging.info('predictions are valid')\nelse:\n logging.error('!!! wrong predictions')\n topn = 10\n for i in range(10):\n logging.error(f'{ytrue[i]} {ypred[i]}')\n\n\nlogging.info('start benchmark')\nm = timeit.repeat('ypred = lg.predict(X, raw_score=True, num_threads=1)', repeat=100, number=1, globals=globals())\nm = np.array(m) * 1000.0\nlogging.info(f'done')\nlogging.info(f'timings (ms): min = {np.min(m):.4f}, mean = {np.mean(m):.4f}, max = {np.max(m):.4f}, std = {np.std(m):.4f}')\n","sub_path":"benchmark/lghiggs.py","file_name":"lghiggs.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"497508305","text":"class Solution(object):\n def spiralOrder(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: List[int]\n \"\"\"\n if not matrix: return []\n return self.printMatrix(matrix, 0, len(matrix) - 1, 0, len(matrix[0]) - 1)\n\n def printMatrix(self, matrix, sr, er, sc, ec):\n if sr > er or sc > ec:\n return []\n res = []\n i = sr\n for j in xrange(sc, ec + 1):\n res.append(matrix[i][j])\n j = ec\n for i in xrange(sr + 1, er + 1):\n res.append(matrix[i][j])\n i = er\n if er > sr: # only scan back when multiple rows\n for j in xrange(ec - 1, sc - 1, -1):\n res.append(matrix[i][j])\n j = sc\n if ec > sc: # only scan back when multiple cols\n for i in xrange(er - 1, sr, -1):\n res.append(matrix[i][j])\n inner = self.printMatrix(matrix, sr + 1, er - 1, sc + 1, ec - 1)\n res.extend(inner)\n return res\n","sub_path":"54_spiral_matrix_I/prac1.py","file_name":"prac1.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"288588918","text":"from slacker import Chat, Slacker\nimport json\n\nDEFAULT_TIMEOUT = 10\nDEFAULT_RETRIES = 0\n\n\nclass SlackerWrapper(Slacker):\n def __init__(self, token, incoming_webhook_url=None,\n timeout=DEFAULT_TIMEOUT, http_proxy=None, https_proxy=None,\n session=None, rate_limit_retries=DEFAULT_RETRIES):\n super().__init__(token, incoming_webhook_url, timeout, http_proxy, https_proxy, session, rate_limit_retries)\n\n proxies = {}\n if http_proxy:\n proxies['http'] = http_proxy\n if https_proxy:\n proxies['https'] = https_proxy\n\n api_args = {\n 'token': token,\n 'timeout': timeout,\n 'proxies': proxies,\n 'session': session,\n 'rate_limit_retries': rate_limit_retries,\n }\n\n self.chat = ChatWrapper(**api_args)\n\n\nclass ChatWrapper(Chat):\n def post_message(self, channel, text=None, username=None, as_user=None,\n parse=None, link_names=None, attachments=None,\n unfurl_links=None, unfurl_media=None, icon_url=None,\n icon_emoji=None, thread_ts=None, reply_broadcast=None):\n\n # Ensure attachments are json encoded\n if attachments:\n if isinstance(attachments, list):\n attachments = json.dumps(attachments)\n\n return self.post('chat.postMessage',\n data={\n 'channel': channel,\n 'text': text,\n 'username': username,\n 'parse': 'full',\n 'attachments': attachments,\n 'unfurl_links': unfurl_links,\n 'unfurl_media': unfurl_media,\n 'icon_url': icon_url,\n 'icon_emoji': icon_emoji,\n 'as_user': True,\n 'link_names': 1,\n 'mrkdwn': True\n })\n\n def me_message(self, channel, text):\n return self.post('chat.meMessage',\n data={'channel': channel, 'text': text})\n\n def reply(self, channel, text=None, thread_ts=None,\n username=None, as_user=None, parse=None, link_names=None,\n attachments=None, unfurl_links=None, unfurl_media=None,\n icon_url=None, icon_emoji=None):\n\n if attachments:\n if isinstance(attachments, list):\n attachments = json.dumps(attachments)\n\n return self.post('chat.postMessage',\n data={\n 'channel': channel,\n 'text': text,\n 'username': username,\n 'parse': 'full',\n 'attachments': attachments,\n 'unfurl_links': unfurl_links,\n 'unfurl_media': unfurl_media,\n 'icon_url': icon_url,\n 'thread_ts': thread_ts,\n 'icon_emoji': icon_emoji,\n 'as_user': True,\n 'link_names': 1\n })\n","sub_path":"simple_slack_bot/slacker_wrapper.py","file_name":"slacker_wrapper.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"116235241","text":"import cv2\nimport mediapipe as mp\nimport pyautogui\nimport time\nimport numpy as np\npyautogui.FAILSAFE = False\n\n\nclass HandDetector:\n def __init__(self):\n self.vid = cv2.VideoCapture(0)\n self.mpHands = mp.solutions.hands\n self.hands = self.mpHands.Hands(\n static_image_mode=False,\n max_num_hands=1,\n min_detection_confidence=0.5,\n min_tracking_confidence=0.5\n )\n self.mpDraw = mp.solutions.drawing_utils\n self.canvas = np.zeros((int(self.vid.get(4)), int(self.vid.get(3))), np.uint8)\n\n def vid_capture(self):\n ret, frame = self.vid.read()\n frame = cv2.flip(frame, 1)\n imgRGB = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n return frame, imgRGB\n\n def detect_hands(self, frameRGB):\n hands_detected = self.hands.process(frameRGB)\n h, w, c = frameRGB.shape\n index_x = index_y = palm_x = thumb_x = 50 # Initially\n if hands_detected.multi_hand_landmarks:\n for hand in hands_detected.multi_hand_landmarks:\n for i, lm in enumerate(hand.landmark):\n if i == 0:\n palm_x = int(lm.x * w)+45\n elif i == 4:\n thumb_x = int(lm.x * w)+45\n elif i == 8:\n index_x = int(lm.x * w) + 45\n index_y = int(lm.y * h)+25\n return index_x, index_y, palm_x, thumb_x\n\n @staticmethod\n def is_clicking(palm_x, thumb_x, clicked, init_time):\n contraction = (thumb_x - palm_x) if (thumb_x - palm_x) >= 0 else (thumb_x - palm_x) * -1\n if contraction < 100 and not clicked and time.time() - init_time > 3:\n return True\n elif contraction >= 100 and clicked:\n return False\n\n def main(self):\n init_time = time.time()\n while True:\n frame, frameRGB = self.vid_capture()\n try:\n index_x, index_y, palm_x, thumb_x = self.detect_hands(frameRGB)\n except TypeError:\n index_x = index_y = palm_x = thumb_x = 50\n clicked = False\n if self.is_clicking(palm_x, thumb_x, clicked, init_time):\n clicked = True\n self.canvas = cv2.circle(self.canvas, (index_x+45, index_y-25), 3, (255, 255, 255), cv2.FILLED)\n cv2.imshow(\"frame\", frame)\n cv2.imshow(\"canvas\", self.canvas)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\nHandDetector().main()\n","sub_path":"hand.py","file_name":"hand.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"477291313","text":"# %% \n# HW 10 - Richard Marcelain\n# Create a map with 4 layers (incl. 2 vectors) with a legend\n\n# %%\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport pandas as pd\nimport numpy as np\nimport geopandas as gpd\nimport fiona\nfrom shapely.geometry import Point\nimport contextily as ctx\n\n\n# %%\n# Gauges II USGS stream gauge dataset:\n# Download here:\n# https://water.usgs.gov/GIS/metadata/usgswrd/XML/gagesII_Sept2011.xml#stdorder\n\n# Reading it using geopandas\nfile = os.path.join('E://datasets//gagesII_9322_sept30_2011.shp')\ngages = gpd.read_file(file)\n\n# Let look at what this is \ntype(gages)\ngages.head()\ngages.columns\ngages.shape\n\n# Looking at the geometry now\ngages.geom_type\n#check our CRS - coordinate reference system \ngages.crs\n#Check the spatial extent \ngages.total_bounds\n#NOTE to selves - find out how to get these all at once\n\n# %% \n# Zoom in and just look at AZ\ngages.columns\ngages.STATE.unique()\ngages_az=gages[gages['STATE']=='AZ']\ngages_az.shape\n\n# AZ Gages - color by attribute\nfig, ax = plt.subplots(figsize=(5, 5))\ngages_az.plot(column='DRAIN_SQKM', categorical=False,\n legend=True, markersize=45, cmap='RdBu',\n ax=ax)\nax.set_title(\"Arizona stream gauge drainge area\\n (sq km)\")\nplt.show()\n\n\n# %% \n# adding more datasets\n# https://www.usgs.gov/core-science-systems/ngp/national-hydrography/access-national-hydrography-products\n# https://viewer.nationalmap.gov/basic/?basemap=b1&category=nhd&title=NHD%20View\n\n# Example reading in a geodataframe\n# Watershed boundaries for the lower colorado \nfile = os.path.join('E://datasets//WBD_15_HU2_GDB.gdb')\n\nfiona.listlayers(file)\nHUC6 = gpd.read_file(file, layer=\"WBDHU6\")\n\ntype(HUC6)\nHUC6.head()\n\n# plot the new layer we got:\nfig, ax = plt.subplots(figsize=(5, 5))\nHUC6.plot(ax=ax)\nax.set_title(\"HUC Boundaries\")\nplt.show()\n\nHUC6.crs\n\n\n# %%\n# Add data points\nua_np = np.array([[-110.97688412, 32.22877495]])\nverde_np = np.array([[-111.7891667, 34.44833333]])\n\n# Make these into spatial features\nua_geom = [Point(xy) for xy in ua_np]\nverde_geom = [Point(xy) for xy in verde_np]\nua_geom\nverde_geom\n\n# Make a dataframe of these points\nua = gpd.GeoDataFrame(ua_geom, columns= ['geometry'],\n crs=HUC6.crs)\n\nverde = gpd.GeoDataFrame(verde_geom, columns= ['geometry'],\n crs=HUC6.crs)\n\n# Re-project onto map\nua_point = ua.to_crs(gages_az.crs)\nverde_point = verde.to_crs(gages_az.crs)\n\n\n# %%\n# Rivers layer\n# https://repository.arizona.edu/handle/10150/188710\nfile = os.path.join('E://datasets//az_hydro_routesNAD83.shp')\nfiona.listlayers(file)\naz_rivers = gpd.read_file(file)\naz_proj_rivers = az_rivers.to_crs(gages.crs)\n\n\n# %% \n# Project the basins \nHUC6_project = HUC6.to_crs(gages_az.crs)\n\n\n# Plot\nfig, ax = plt.subplots(figsize=(10, 10))\ngages_az.plot(ax=ax, label='All Stream Gages', color='blue', markersize=10,\n zorder=3)\nHUC6_project.boundary.plot(ax=ax, label='HUC6 Bondary', color=None,\n edgecolor='black', linewidth=0.8, zorder=2)\naz_proj_rivers.plot(ax=ax, label='Rivers', color='grey', zorder=0)\nua_point.plot(ax=ax, label='UA Gage', color='red', edgecolor='white', marker='v',\n markersize=150, zorder=4)\nverde_point.plot(ax=ax, label='Verde River Gage', color='orange', edgecolor='white', marker='v',\n markersize=150, zorder=5)\nax.set_title('Arizona Stream Gages')\nax.set_xlabel('Northing (m)')\nax.set_ylabel('Easting (m)')\nctx.add_basemap(ax, crs=gages_az.crs, url=ctx.providers.OpenTopoMap, zorder=1, alpha=0.5)\nax.legend()\nplt.show()\n#fig.savefig(\"map.png\")\n# \n\n\n# %%\n","sub_path":"marcelain_map.py","file_name":"marcelain_map.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"560191683","text":"# Aaron Newton\r\n# World Series Winners pg.417 7\r\n\r\ndef main():\r\n \r\n infile = open('WorldSeriesWinners.txt', 'r')\r\n winners = {}\r\n start_year = 1903\r\n year_dict = {} \r\n winners = infile.read().splitlines()\r\n infile.close()\r\n enter_team(winners)\r\n enter_year(winners, start_year, year_dict)\r\n\r\ndef enter_team(winners):\r\n \r\n team_name = input('Enter the name of a team: ')\r\n \r\n counter = 0\r\n\r\n for winner in winners:\r\n if team_name.lower() == winner.lower():\r\n counter = counter + 1\r\n \r\n if counter == 1: \r\n print(\"The\", team_name, \"won the World Series\", counter, \"time between 1903 and 2009.\")\r\n elif counter > 1:\r\n print(\"The\", team_name, \"won the World Series\", counter, \"times between 1903 and 2009.\")\r\n else:\r\n print(\"The\", team_name, \"never won the World Series.\")\r\n \r\ndef enter_year(winners, start_year, year_dict):\r\n\r\n for i in range(len(winners)):\r\n team = winners[i].rstrip('\\n')\r\n year = start_year + i\r\n if year >= 1904:\r\n year += 1\r\n if year >= 1994:\r\n year += 1\r\n year_dict[str(year)] = team\r\n \r\n year = input('\\nEnter a year between 1903-2009: ')\r\n \r\n if year == '1904' or year == '1994':\r\n print('The World Series wasnt played during the', year, 'season')\r\n elif year < '1903' or year > '2009':\r\n print('The data for the ', year, ' season is not included')\r\n else:\r\n winner = year_dict[year]\r\n print('The', winner, 'won the World Series in', year)\r\n \r\nmain()\r\n","sub_path":"Code/WorldSeriesWinners.py","file_name":"WorldSeriesWinners.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"140796917","text":"from google.cloud import storage\nimport pandas as pd\nfrom io import StringIO\n\nstorage_client = storage.Client.from_service_account_json('/home/data/secretgc.json')\n\ndf = pd.read_csv('/home/data/test.csv')\nf = StringIO()\ndf.to_csv(f)\nf.seek(0)\n\nbucket = storage_client.bucket('airly_data')\nblob = bucket.blob('docker_test.csv')\n\nblob.upload_from_file(f, content_type='text/csv')","sub_path":"docker_gcp/UploadBlob.py","file_name":"UploadBlob.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"23040066","text":"\"\"\"\nWhere RSA-like cryptography takes place.\nThis program was created to encrypt the keys required for the Code-Challenge and will only work\nwith files formatted like them.\n\"\"\"\n\nimport generator as g\nfrom typing import Tuple\nimport sys\n\n\ndef intKey(key: str) -> Tuple[int, int]:\n \"\"\"Converts a string to tuple of ints\n\n param key: string -- the key from standard input\n\n Returns a tuple of ints if correct format or -1 otherwise\n \"\"\"\n try:\n if key.find(',') != -1:\n newK = key.split(',')\n e = int(newK[0])\n n = int(newK[1])\n k = (e, n)\n return k\n return (-1, -1)\n except:\n raise Exception(\n f\"ERROR: invalid key -- '{sys.argv[2]}' is not valid numeric format.\\nProper format: 11,17\"\n )\n\n\ndef crypto(file, key: Tuple[int, int]) -> None:\n \"\"\"Cryptographic reading and writing of files.\n\n Params:\n file: string -- name of file.\n key: tuple of ints\n\n Returns nothing.\n \"\"\"\n toWrite = \"\"\n with open(sys.argv[3], 'r') as f:\n data = f.readlines()\n data = [x.strip() for x in data]\n for line in data:\n member = line.split(' ')\n for value in member:\n if int(value) == 0:\n pass\n if sys.argv[1] == '-e':\n cipher = encrypt(int(value), key)\n toWrite += str(cipher) + \" \"\n elif sys.argv[1] == '-d':\n decipher = decrypt(int(value), key)\n toWrite += str(decipher) + \" \"\n file.write(toWrite.strip())\n file.write('\\n')\n toWrite = \"\"\n\n\ndef workOnFile(fileName: str, key: Tuple[int, int]):\n \"\"\"\n Params:\n fileName: string\n key: tuple of ints\n \"\"\"\n with open(fileName, 'w') as c:\n crypto(c, key)\n\n\ndef encrypt(m: int, PU: Tuple[int, int]) -> int:\n \"\"\"Encryption\n\n Param:\n m: int -- value to encrypt\n PU: tuple of ints -- key\n\n Returns ciphertext\n \"\"\"\n e, n = PU\n cipher = (m**e) % n\n return cipher\n\n\ndef decrypt(cipher: int, PR: Tuple[int, int]) -> int:\n \"\"\"Decryption\n\n Param:\n cipher: int -- value to decrypt\n PR: tuple of ints -- key\n\n Returns message\n \"\"\"\n d, n = PR\n decipher = (cipher**d) % n\n return decipher\n\n\ndef main():\n words = (\"Commands:\\n\"\n \" -g Generate encryption keys and write\\n\"\n \" them to a document of your choosing.\\n\"\n \" e.g. RSA.py -g [file_To_Write_To]\\n\\n\"\n \" -e Encrypt using [key] on [file].\\n\"\n \" e.g. RSA.py -e [file_To_Encrypt]\\n\\n\"\n \" -d decrypt a file with a specified key.\\n\"\n \" e.g. RSA.py -d [file_To_Decrypt]\")\n if len(sys.argv) < 2:\n print(\"General Usage:\\n RSA.py [key] [file]\\n\")\n print(words)\n elif len(sys.argv) == 3:\n if sys.argv[1] == '-g':\n PU, PR = g.gen(8)\n with open(sys.argv[2], 'w+') as f:\n f.write(f\"Public key: {PU}\\nPrivate key: {PR}\")\n else:\n print(f\"ERROR: Invalid Command -- {sys.argv[1]}\\n{words}\")\n\n elif len(sys.argv) == 4:\n key = intKey(sys.argv[2])\n if sys.argv[1] == '-e':\n if key != -1:\n workOnFile('output.txt', key)\n elif sys.argv[1] == '-d':\n if key != -1:\n workOnFile('message.txt', key)\n else:\n print(f\"ERROR: Invalid Command -- {sys.argv[1]}\\n{words}\")\n else:\n print(\n f\"ERROR: expected 2 or 3 arguments received {len(sys.argv) - 1}\\n{words}\"\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"RSA/src/RSA.py","file_name":"RSA.py","file_ext":"py","file_size_in_byte":3737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"168405970","text":"# Work with vtkVolume objects (voxel datasets) and surfaces.\n#\nfrom vtkplotter import vtkio, utils, Plotter\nfrom vtkplotter.actors import Volume\n\nvp = Plotter()\n\n# Load a 3D voxel dataset (returns a vtkImageData object):\nimg = vtkio.loadImageData('data/embryo.slc', spacing=[1,1,1])\n\n# Build a vtkVolume object. \n# A set of transparency values - of any length - can be passed\n# to define the opacity transfer function in the range of the scalar.\n# E.g.: setting alphas=[0, 0, 0, 1, 0, 0, 0] would make visible\n# only voxels with value close to 98.5 (see print output).\nvol = Volume(img, c='green', alphas=[0, 0.4, 0.9, 1]) \n\nsph = vp.sphere(pos=[100,100,100], r=20) # add a dummy surface\n\nvp.show([vol, sph], zoom=1.4) # show both vtkVolume and vtkActor\n\n\n\n\n","sub_path":"examples/volumetric/readVolume.py","file_name":"readVolume.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"139053157","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = u'Members of the ML-KA group'\nSITENAME = u'Machine Learning - Karlsruhe'\nSITEURL = '//ml-ka.de'\n\nPATH = 'content'\n\nTIMEZONE = 'Europe/Berlin'\n\nDEFAULT_LANG = u'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\nLINKS = (('GitHub/ML-KA', 'https://github.com/ML-KA/'),)\n\n# Social widget\nSOCIAL = (('Facebook', 'https://www.facebook.com/mlkarlsruhe'),\n # ('Another social link', '#'),\n )\n\nDEFAULT_PAGINATION = 10\n\n# Uncomment following line if you want document-relative URLs when developing\n# RELATIVE_URLS = True\n\nTHEME = 'pelican-bootstrap3'\n\nARTICLE_URL = '{slug}/'\nARTICLE_SAVE_AS = '{slug}/index.html'\nPAGE_URL = 'pages/{slug}/'\nPAGE_SAVE_AS = 'pages/{slug}/index.html'\nCATEGORY_URL = 'category/{slug}/'\nCATEGORY_SAVE_AS = 'category/{slug}/index.html'\nTAG_URL = 'tag/{slug}/'\nTAG_SAVE_AS = 'tag/{slug}/index.html'\n\nPLUGIN_PATHS = ['./pelican-bootstrapify',\n './pelican_plugin-render_math',\n './simple_footnotes',\n './pelican-toc']\nPLUGINS = ['bootstrapify',\n 'pelican_plugin-render_math',\n 'simple_footnotes',\n 'toc']\n\nSTATIC_PATHS = ['images', 'extra/CNAME']\nEXTRA_PATH_METADATA = {'extra/CNAME': {'path': 'CNAME'}, }\nSUMMARY_MAX_LENGTH = 0","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"43631349","text":"import Constants\nimport numpy as np\nimport os.path\nimport os, pickle, yaml\nfrom skimage import io\n\ndef MakeTest_pose_yaml(dictionary, keys2save, saveasfile):\n dict_test = {}\n for key in keys2save:\n dict_test[key] = dictionary[key]\n\n dict_test['scoremap_dir'] = 'test'\n with open(saveasfile, \"w\") as f:\n yaml.dump(dict_test, f)\n\ndef MakeTrain_pose_yaml(itemstochange,saveasconfigfile,defaultconfigfile):\n raw = open(defaultconfigfile).read()\n docs = []\n for raw_doc in raw.split('\\n---'):\n try:\n docs.append(yaml.load(raw_doc,Loader=yaml.SafeLoader))\n except SyntaxError:\n docs.append(raw_doc)\n\n for key in itemstochange.keys():\n docs[0][key] = itemstochange[key]\n\n with open(saveasconfigfile, \"w\") as f:\n yaml.dump(docs[0], f)\n return docs[0]\n\n\ndef SaveMetadata(metadatafilename, data, trainIndexes, testIndexes, trainFraction):\n with open(metadatafilename, 'wb') as f:\n # Pickle the 'labeled-data' dictionary using the highest protocol available.\n pickle.dump([data, trainIndexes, testIndexes, trainFraction], f, pickle.HIGHEST_PROTOCOL)\n\ndef _read_image_shape_fast(path):\n return io.imread(path).shape\n\ndef format_training_data(df, train_inds, nbodyparts):\n train_data = []\n matlab_data = []\n\n def to_matlab_cell(array):\n outer = np.array([[None]], dtype=object)\n outer[0, 0] = array.astype('int64')\n return outer\n\n for i in train_inds:\n data = dict()\n filename = df.index[i]\n data['image'] = filename\n img_shape = _read_image_shape_fast(os.path.join(Constants.projectDirectory, filename))\n try:\n data['size'] = img_shape[2], img_shape[0], img_shape[1]\n except IndexError:\n data['size'] = 1, img_shape[0], img_shape[1]\n temp = df.iloc[i].values.reshape(-1, 2)\n joints = np.c_[range(nbodyparts), temp]\n joints = joints[~np.isnan(joints).any(axis=1)].astype(int)\n # Check that points lie within the image\n inside = np.logical_and(np.logical_and(joints[:, 1] < img_shape[1], joints[:, 1] > 0),\n np.logical_and(joints[:, 2] < img_shape[0], joints[:, 2] > 0))\n if not all(inside):\n joints = joints[inside]\n if joints.size: # Exclude images without labels\n data['joints'] = joints\n train_data.append(data)\n matlab_data.append((np.array([data['image']], dtype='U'),\n np.array([data['size']]),\n to_matlab_cell(data['joints'])))\n matlab_data = np.asarray(matlab_data, dtype=[('image', 'O'), ('size', 'O'), ('joints', 'O')])\n return train_data, matlab_data\n\ndef SplitTrials(trialindex, trainFraction=0.8):\n ''' Split a trial index into train and test sets. Also checks that the trainFraction is a two digit number between 0 an 1. The reason\n is that the folders contain the trainfraction as int(100*trainFraction). '''\n if trainFraction>1 or trainFraction<0:\n print(\"The training fraction should be a two digit number between 0 and 1; i.e. 0.95. Please change accordingly.\")\n return ([],[])\n\n if abs(trainFraction-round(trainFraction,2))>0:\n print(\"The training fraction should be a two digit number between 0 and 1; i.e. 0.95. Please change accordingly.\")\n return ([],[])\n else:\n trainsetsize = int(len(trialindex) * round(trainFraction,2))\n shuffle = np.random.permutation(trialindex)\n testIndexes = shuffle[trainsetsize:]\n trainIndexes = shuffle[:trainsetsize]\n\n return (trainIndexes, testIndexes)","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"257029967","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n\ntest_general.py\n\nCreated by José Sánchez-Gallego on 7 Apr 2016.\nLicensed under a 3-clause BSD license.\n\nRevision history:\n 7 Apr 2016 J. Sánchez-Gallego\n Initial version\n\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import OrderedDict\nimport unittest\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nimport os\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nimport marvin\nfrom marvin.core.core import DotableCaseInsensitive\nfrom marvin.core.exceptions import MarvinError\nfrom marvin.tests import TemplateTestCase, Call, template\nfrom marvin.utils.general import convertCoords, get_nsa_data, getWCSFromPng, get_plot_params\nfrom marvin.tests import MarvinTest\nfrom marvin.utils.dap.datamodel import get_default_plot_params\n\nclass TestConvertCoords(MarvinTest):\n\n __metaclass__ = TemplateTestCase\n\n @classmethod\n def setUpClass(cls):\n super(TestConvertCoords, cls).setUpClass()\n outver = 'v1_5_1'\n filename = os.path.join(cls.mangaredux, outver, str(cls.plate), 'stack', cls.cubename)\n cls.testHeader = fits.getheader(filename, 1)\n cls.testWcs = WCS(cls.testHeader)\n cls.testShape = fits.getdata(filename, 1).shape[1:]\n\n def test_pix_center(self):\n \"\"\"Tests mode='pix', xyorig='center'.\"\"\"\n\n coords = [[0, 0],\n [5, 3],\n [-5, 1],\n [1, -5],\n [10, 10],\n [-10, -10],\n [1.5, 2.5],\n [0.4, 0.25]]\n\n expected = [[17, 17],\n [20, 22],\n [18, 12],\n [12, 18],\n [27, 27],\n [7, 7],\n [20, 18],\n [17, 17]]\n\n cubeCoords = convertCoords(coords, mode='pix', shape=self.testShape)\n assert_allclose(cubeCoords, np.array(expected))\n\n def test_pix_lower(self):\n \"\"\"Tests mode='pix', xyorig='lower'.\"\"\"\n\n coords = [[0, 0],\n [5, 3],\n [10, 10],\n [1.5, 2.5],\n [0.4, 0.25]]\n\n expected = [[0, 0],\n [3, 5],\n [10, 10],\n [2, 2],\n [0, 0]]\n\n cubeCoords = convertCoords(coords, mode='pix', shape=self.testShape,\n xyorig='lower')\n assert_allclose(cubeCoords, np.array(expected))\n\n def test_sky(self):\n \"\"\"Tests mode='sky'.\"\"\"\n\n coords = np.array([[232.5447, 48.690201],\n [232.54259, 48.688948],\n [232.54135, 48.692415],\n [232.54285, 48.692372]])\n\n expected = [[17, 17],\n [8, 27],\n [33, 33],\n [33, 26]]\n\n cubeCoords = convertCoords(coords, mode='sky', wcs=self.testWcs)\n\n assert_allclose(cubeCoords, np.array(expected))\n\n # This allows to do multiple calls to the same test.\n _outside_calls = {\n 'pix_center_-50_0': Call(\n {'coords': [[-50, 0]], 'mode': 'pix', 'xyorig': 'center'}, []),\n 'pix_center_50_50': Call(\n {'coords': [[50, 50]], 'mode': 'pix', 'xyorig': 'center'}, []),\n 'pix_lower_-50_0': Call(\n {'coords': [[-50, 0]], 'mode': 'pix', 'xyorig': 'lower'}, []),\n 'pix_center_50_50': Call(\n {'coords': [[50, 50]], 'mode': 'pix', 'xyorig': 'lower'}, []),\n 'pix_sky_230_48': Call({'coords': [[230, 48]], 'mode': 'sky'}, []),\n 'pix_center_233_48': Call({'coords': [[233, 48]], 'mode': 'sky'}, [])\n }\n\n @template(_outside_calls)\n def _test_outside(self, kwargs, expected):\n\n mode = kwargs.get('mode')\n if mode == 'sky':\n kwargs['wcs'] = self.testWcs\n\n with self.assertRaises(MarvinError) as cm:\n convertCoords(shape=self.testShape, **kwargs)\n self.assertIn('some indices are out of limits', str(cm.exception))\n\n\nclass TestGetNSAData(MarvinTest):\n\n @classmethod\n def setUpClass(cls):\n super(TestGetNSAData, cls).setUpClass()\n\n def setUp(self):\n self.set_sasurl('local')\n marvin.config.forceDbOn()\n\n def _test_nsa(self, data):\n self.assertIsInstance(data, DotableCaseInsensitive)\n self.assertIn('profmean_ivar', data.keys())\n self.assertEqual(data['profmean_ivar'][0][0], 18.5536117553711)\n\n def _test_drpall(self, data):\n self.assertIsInstance(data, DotableCaseInsensitive)\n self.assertNotIn('profmean_ivar', data.keys())\n self.assertIn('iauname', data.keys())\n self.assertEqual(data['iauname'], 'J153010.73+484124.8')\n\n def test_local_nsa(self):\n data = get_nsa_data(self.mangaid, source='nsa', mode='local')\n self._test_nsa(data)\n\n def test_local_drpall(self):\n data = get_nsa_data(self.mangaid, source='drpall', mode='local')\n self._test_drpall(data)\n\n def test_remote_nsa(self):\n data = get_nsa_data(self.mangaid, source='nsa', mode='remote')\n self._test_nsa(data)\n\n def test_remote_drpall(self):\n data = get_nsa_data(self.mangaid, source='drpall', mode='remote')\n self._test_drpall(data)\n\n def test_auto_nsa_with_db(self):\n data = get_nsa_data(self.mangaid, source='nsa', mode='auto')\n self._test_nsa(data)\n\n def test_auto_drpall_with_drpall(self):\n data = get_nsa_data(self.mangaid, source='drpall', mode='auto')\n self._test_drpall(data)\n\n def test_auto_nsa_without_db(self):\n marvin.config.forceDbOff()\n data = get_nsa_data(self.mangaid, source='nsa', mode='auto')\n self._test_nsa(data)\n\n def test_auto_drpall_without_drpall(self):\n marvin.config._drpall = None\n data = get_nsa_data(self.mangaid, source='drpall', mode='auto')\n self._test_drpall(data)\n\n def test_hybrid_properties_populated(self):\n data = get_nsa_data(self.mangaid, source='nsa', mode='local')\n self.assertIn('elpetro_mag_g', data)\n\n def test_nsa_dotable(self):\n data = get_nsa_data(self.mangaid, source='nsa', mode='local')\n self.assertEqual(data['elpetro_mag_g'], data.elpetro_mag_g)\n\n def test_drpall_dotable(self):\n data = get_nsa_data(self.mangaid, source='drpall', mode='local')\n self.assertEqual(data['iauname'], data.iauname)\n\n def test_nsa_old_target_selection(self):\n data = get_nsa_data('1-178482', source='nsa', mode='local')\n self.assertAlmostEqual(data['sersic_flux_ivar'][0], 1.33179831504822)\n\n def test_nsa_12(self):\n data = get_nsa_data('12-84679', source='nsa', mode='local')\n self.assertAlmostEqual(data['sersic_flux_ivar'][0], 0.127634227275848)\n\n\nclass TestPillowImage(MarvinTest):\n\n @classmethod\n def setUpClass(cls):\n super(TestPillowImage, cls).setUpClass()\n outver = 'v1_5_1'\n cls.filename = os.path.join(cls.mangaredux, outver, str(cls.plate), 'stack/images', cls.imgname)\n\n def test_image_has_wcs(self):\n w = getWCSFromPng(self.filename)\n self.assertEqual(type(w), WCS)\n\n def test_use_pil(self):\n try:\n import PIL\n except ImportError as e:\n with self.assertRaises(ImportError):\n err = 'No module named PIL'\n self.assertEqual(err, e.args[0])\n\nclass TestDataModelPlotParams(MarvinTest):\n \n def bitmasks(self):\n return {'1.1.1': {'badData': {'doNotUse': 0}},\n '2.0.2': {'nocov': 0,\n 'badData': {'unreliable': 5,\n 'doNotUse': 30}\n }\n }\n\n def test_get_plot_params_default_mpl4(self):\n desired = {'bitmasks': self.bitmasks()['1.1.1'],\n 'cmap': 'linearlab',\n 'percentile_clip': [5, 95],\n 'symmetric': False,\n 'snr_min': 1}\n actual = get_plot_params(dapver='1.1.1', prop='emline_gflux')\n self.assertDictEqual(actual, desired)\n\n def test_get_plot_params_default_mpl5(self):\n desired = {'bitmasks': self.bitmasks()['2.0.2'],\n 'cmap': 'linearlab',\n 'percentile_clip': [5, 95],\n 'symmetric': False,\n 'snr_min': 1}\n actual = get_plot_params(dapver='2.0.2', prop='emline_gflux')\n self.assertDictEqual(actual, desired)\n\n def test_get_plot_params_vel_mpl4(self):\n desired = {'bitmasks': self.bitmasks()['1.1.1'],\n 'cmap': 'RdBu_r',\n 'percentile_clip': [10, 90],\n 'symmetric': True,\n 'snr_min': None}\n actual = get_plot_params(dapver='1.1.1', prop='stellar_vel')\n self.assertDictEqual(actual, desired)\n \n def test_get_plot_params_vel_mpl5(self):\n desired = {'bitmasks': self.bitmasks()['2.0.2'],\n 'cmap': 'RdBu_r',\n 'percentile_clip': [10, 90],\n 'symmetric': True,\n 'snr_min': None}\n actual = get_plot_params(dapver='2.0.2', prop='stellar_vel')\n self.assertDictEqual(actual, desired)\n \n def test_get_plot_params_sigma_mpl4(self):\n desired = {'bitmasks': self.bitmasks()['1.1.1'],\n 'cmap': 'inferno',\n 'percentile_clip': [10, 90],\n 'symmetric': False,\n 'snr_min': 1}\n actual = get_plot_params(dapver='1.1.1', prop='stellar_sigma')\n self.assertDictEqual(actual, desired)\n\n def test_get_plot_params_sigma_mpl5(self):\n desired = {'bitmasks': self.bitmasks()['2.0.2'],\n 'cmap': 'inferno',\n 'percentile_clip': [10, 90],\n 'symmetric': False,\n 'snr_min': 1}\n actual = get_plot_params(dapver='2.0.2', prop='stellar_sigma')\n self.assertDictEqual(actual, desired)\n\n\nif __name__ == '__main__':\n verbosity = 2\n unittest.main(verbosity=verbosity)\n","sub_path":"python/marvin/tests/utils/test_general.py","file_name":"test_general.py","file_ext":"py","file_size_in_byte":10136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"546965375","text":"__author__ = 'GunMade'\n\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread(r'image\\flower.jpg', 0)\nequ = cv2.equalizeHist(img)\nres = np.hstack((img, equ))\ncv2.imshow('equalization', img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"OpenCV_practice/equalization.py","file_name":"equalization.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"245788913","text":"'''\nCreated on Dec 5, 2012\n\n@author: Mason\n'''\nfrom django.conf.urls import patterns, include, url\n\nurlpatterns = patterns('images.views',\n url(r'^(?P\\d+)/(?P\\d+)$', 'index'),\n url(r'^(?P\\d+)/addTag/$', 'addTag'),\n url(r'^popular/(?P.*)/$', 'popularImage'),\n)","sub_path":"images/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"505915778","text":"# Faça um Programa que peça o raio de um círculo, calcule e mostre sua área.\n# Ivo Dias\n\n# Recebe a biblioteca de calculos\nimport math\n\n# Recebe o raio\nraio = int(input(\"Informe o raio: \"))\n\n# Faz o calculo\narea = 2*math.pi*raio\n\n# Mostra na tela\nprint(\"A area é\",area)","sub_path":"PythonBrasil/EstruturaSequencial/Area.py","file_name":"Area.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"559740679","text":"\"\"\"\" mymath - example of module \"\"\"\n\npi = 3.1419\ndef area(r):\n \"\"\" area(r): return the area of a circle with radius r. \"\"\"\n global pi\n return (pi*r*r)\n\nd = {}\nd[0] = (1,2)\nd.update({1:2})\n\nprint(d)","sub_path":"concepts/modules/mymath.py","file_name":"mymath.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"260931922","text":"\"\"\"toolinterface URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom display.views import homepage, display_packets, edit_packet, save,\\\n delete_packet, upload_trace, test, replay, sendpackets, delete_trace\n\nurlpatterns = [\n url(r'test/', test),\n url(r'home/', homepage),\n url(r'upload_trace', upload_trace),\n url(r'delete_trace', delete_trace),\n url(r'display/', display_packets),\n url(r'edit_packet/', edit_packet),\n url(r'save/', save),\n url(r'delete_packet/', delete_packet),\n url(r'replay/', replay),\n url(r'sendpackets/', sendpackets),\n url(r'^admin/', admin.site.urls),\n]\n","sub_path":"toolgui/toolinterface/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"101056505","text":"#!/usr/bin/python\n\n# You need the plant files in the following format to work\n# PLANTNAME_YEAR_MM.csv such as GOGEM_2018_01.csv\n# Pay attention for using 2 digits in month values.\n\n# Solarian - 2019\n\n\nimport glob, os, sys\nimport pandas as pd\n\ndef main(workingpath):\n # Get files names and prepare for sheet naming\n #path = \"/Users/OrcunBaslak/Desktop/TurkeySPP/\"\n path = workingpath\n print(\"Working Directory: \"+workingpath)\n filelist = glob.glob(path + \"*.csv\")\n plantnames = []\n for filename in sorted(filelist):\n (_, f_name) = os.path.split(filename)\n (file_name, _) = os.path.splitext(f_name) \n file_name = file_name.split(sep=\"_\") \n plantnames.append(file_name[0])\n plantnames = list(set(plantnames))\n\n if len(plantnames) == 0:\n print(\"Error: No CSV files to parse\")\n return\n\n # Get CSV files and combine them into dataframes. Then write dataframes to excel sheets\n for plantname in sorted(plantnames):\n filelist = glob.glob(path + plantname + \"_*.csv\")\n all_plant_rows = pd.DataFrame()\n for filename in sorted(filelist):\n df_csv = pd.read_csv(filename, sep=\";\", header=0, decimal=\".\", parse_dates=True)\n df_csv = df_csv.drop(labels=df_csv.columns[len(df_csv.columns)-1], axis=1)\n # Fix for empty values\n df_csv.replace({'-': '0'}, regex=True, inplace=True)\n cols=[i for i in df_csv.columns]\n cols.pop(0)\n all_plant_rows = pd.concat([all_plant_rows, df_csv], sort=False)\n \n #Get rid of the junk in column names\n standardize_headers(all_plant_rows)\n\n #Put your code here to process the dataframe\n \n print(\"Processing: \"+plantname+\" ... DONE!\")\n\ndef standardize_headers(df):\n new_column_names = []\n new_column_names.append(df.columns[0])\n for idx, header in enumerate(df.columns):\n if idx > 0:\n new_column_names.append(header.split(\"/\")[0])\n \n df.columns = new_column_names\n\n\nif __name__ == \"__main__\":\n #Check for args\n if len(sys.argv) > 1:\n main(sys.argv[1]) \n else:\n main(os.getcwd())\n","sub_path":"solarian_sma_processor.py","file_name":"solarian_sma_processor.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"247607935","text":"import numpy as np\n\n# Bubble sorting algorithm implementation. Returns sorted array.\ndef bubble_sort(arr):\n for i in range(len(arr)):\n for j in range(len(arr) - 1):\n if arr[j] > arr[j + 1]:\n arr[j], arr[j+1] = arr[j+1], arr[j]\n return arr\n\n# Test array\narr = np.random.randint(0,100,20)\nresult = bubble_sort(arr)\n# Sorted array for validation\narr.sort()\nprint(f\"Bubble sort result: {result}\")\nprint(f\"Sorted array: {arr}\")","sub_path":"bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"65902486","text":"#!/usr/bin/env python\n# Copyright 2015 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport logging\nimport os\nimport shutil\nimport socket\nimport subprocess\nimport threading\nimport time\nimport shlex\nimport sys\n\nimport config as nodepool_config\nimport exceptions\nimport provider_manager\nimport stats\nimport zk\n\n\nMINS = 60\nHOURS = 60 * MINS\nIMAGE_TIMEOUT = 6 * HOURS # How long to wait for an image save\nSUSPEND_WAIT_TIME = 30 # How long to wait between checks for\n # ZooKeeper connectivity if it disappears.\n\n# HP Cloud requires qemu compat with 0.10. That version works elsewhere,\n# so just hardcode it for all qcow2 building\nDEFAULT_QEMU_IMAGE_COMPAT_OPTIONS = \"--qemu-img-options 'compat=0.10'\"\n\n\nclass DibImageFile(object):\n '''\n Class used as an API to finding locally built DIB image files, and\n also used to represent the found files. Image files are named using\n a unique ID, but can be available in multiple formats (with different\n extensions).\n '''\n def __init__(self, image_id, extension=None):\n self.image_id = image_id\n self.extension = extension\n self.md5 = None\n self.md5_file = None\n self.sha256 = None\n self.sha256_file = None\n\n @staticmethod\n def from_path(path):\n image_file = os.path.basename(path)\n image_id, extension = image_file.rsplit('.', 1)\n return DibImageFile(image_id, extension)\n\n @staticmethod\n def from_image_id(images_dir, image_id):\n images = []\n for image_filename in os.listdir(images_dir):\n if os.path.isfile(os.path.join(images_dir, image_filename)):\n image = DibImageFile.from_path(image_filename)\n if image.image_id == image_id:\n images.append(image)\n return images\n\n @staticmethod\n def from_images_dir(images_dir):\n return [DibImageFile.from_path(x) for x in os.listdir(images_dir)]\n\n def to_path(self, images_dir, with_extension=True):\n my_path = os.path.join(images_dir, self.image_id)\n if with_extension:\n if self.extension is None:\n raise exceptions.BuilderError(\n 'Cannot specify image extension of None'\n )\n my_path += '.' + self.extension\n\n md5_path = '%s.%s' % (my_path, 'md5')\n md5 = self._checksum(md5_path)\n if md5:\n self.md5_file = md5_path\n self.md5 = md5[0:32]\n\n sha256_path = '%s.%s' % (my_path, 'sha256')\n sha256 = self._checksum(sha256_path)\n if sha256:\n self.sha256_file = sha256_path\n self.sha256 = sha256[0:64]\n\n return my_path\n\n def _checksum(self, filename):\n if not os.path.isfile(filename):\n return None\n with open(filename, 'r') as f:\n data = f.read()\n return data\n\n\nclass BaseWorker(threading.Thread):\n def __init__(self, config_path, interval, zk):\n super(BaseWorker, self).__init__()\n self.log = logging.getLogger(\"nodepool.builder.BaseWorker\")\n self.daemon = True\n self._running = False\n self._config = None\n self._config_path = config_path\n self._zk = zk\n self._hostname = socket.gethostname()\n self._statsd = stats.get_client()\n self._interval = interval\n\n def _checkForZooKeeperChanges(self, new_config):\n '''\n Check config for ZooKeeper cluster changes.\n\n If the defined set of ZooKeeper servers changes, the connection\n will use the new server set.\n '''\n if self._config.zookeeper_servers != new_config.zookeeper_servers:\n self.log.debug(\"Detected ZooKeeper server changes\")\n self._zk.resetHosts(new_config.zookeeper_servers.values())\n\n @property\n def running(self):\n return self._running\n\n def shutdown(self):\n self._running = False\n\n\nclass CleanupWorker(BaseWorker):\n '''\n The janitor of nodepool-builder that will remove images from providers\n and any local DIB builds.\n '''\n\n def __init__(self, name, config_path, interval, zk):\n super(CleanupWorker, self).__init__(config_path, interval, zk)\n self.log = logging.getLogger(\"nodepool.builder.CleanupWorker.%s\" % name)\n self.name = 'CleanupWorker.%s' % name\n\n def _buildUploadRecencyTable(self):\n '''\n Builds a table for each image of the most recent uploads to each\n provider.\n\n Example)\n\n image1:\n providerA: [ (build_id, upload_id, upload_time), ... ]\n providerB: [ (build_id, upload_id, upload_time), ... ]\n image2:\n providerC: [ (build_id, upload_id, upload_time), ... ]\n '''\n self._rtable = {}\n for image in self._zk.getImageNames():\n self._rtable[image] = {}\n for build in self._zk.getBuilds(image, zk.READY):\n for provider in self._zk.getBuildProviders(image, build.id):\n if provider not in self._rtable[image]:\n self._rtable[image][provider] = []\n uploads = self._zk.getMostRecentBuildImageUploads(\n 2, image, build.id, provider, zk.READY)\n for upload in uploads:\n self._rtable[image][provider].append(\n (build.id, upload.id, upload.state_time)\n )\n\n # Sort uploads by state_time (upload time) and keep the 2 most recent\n for i in self._rtable.keys():\n for p in self._rtable[i].keys():\n self._rtable[i][p].sort(key=lambda x: x[2], reverse=True)\n self._rtable[i][p] = self._rtable[i][p][:2]\n\n def _isRecentUpload(self, image, provider, build_id, upload_id):\n '''\n Search for an upload for a build within the recency table.\n '''\n provider = self._rtable[image].get(provider)\n if not provider:\n return False\n\n for b_id, u_id, u_time in provider:\n if build_id == b_id and upload_id == u_id:\n return True\n return False\n\n def _inProgressUpload(self, upload):\n '''\n Determine if an upload is in progress.\n '''\n if upload.state != zk.UPLOADING:\n return False\n\n try:\n with self._zk.imageUploadLock(upload.image_name, upload.build_id,\n upload.provider_name,\n blocking=False):\n pass\n except exceptions.ZKLockException:\n return True\n return False\n\n def _removeDibItem(self, filename):\n if filename is None:\n return\n try:\n os.remove(filename)\n self.log.info(\"Removed DIB file %s\" % filename)\n except OSError as e:\n if e.errno != 2: # No such file or directory\n raise e\n\n def _deleteLocalBuild(self, image, build_id, builder):\n '''\n Remove expired image build from local disk.\n\n :param str image: Name of the image whose build we are deleting.\n :param str build_id: ID of the build we want to delete.\n :param str builder: hostname of the build.\n\n :returns: True if files were deleted, False if none were found.\n '''\n base = \"-\".join([image, build_id])\n files = DibImageFile.from_image_id(self._config.imagesdir, base)\n if not files:\n # NOTE(pabelanger): It is possible we don't have any files because\n # diskimage-builder failed. So, check to see if we have the correct\n # builder so we can removed the data from zookeeper.\n if builder == self._hostname:\n return True\n return False\n\n self.log.info(\"Doing cleanup for %s:%s\" % (image, build_id))\n\n manifest_dir = None\n\n for f in files:\n filename = f.to_path(self._config.imagesdir, True)\n if not manifest_dir:\n path, ext = filename.rsplit('.', 1)\n manifest_dir = path + \".d\"\n map(self._removeDibItem, [filename, f.md5_file, f.sha256_file])\n\n try:\n shutil.rmtree(manifest_dir)\n self.log.info(\"Removed DIB manifest %s\" % manifest_dir)\n except OSError as e:\n if e.errno != 2: # No such file or directory\n raise e\n\n return True\n\n def _cleanupProvider(self, provider, image, build_id):\n all_uploads = self._zk.getUploads(image, build_id, provider.name)\n\n for upload in all_uploads:\n if self._isRecentUpload(image, provider.name, build_id, upload.id):\n continue\n self._deleteUpload(upload)\n\n def _cleanupObsoleteProviderUploads(self, provider, image, build_id):\n image_names_for_provider = provider.images.keys()\n if image in image_names_for_provider:\n # This image is in use for this provider\n return\n\n all_uploads = self._zk.getUploads(image, build_id, provider.name)\n for upload in all_uploads:\n self._deleteUpload(upload)\n\n def _deleteUpload(self, upload):\n deleted = False\n\n if upload.state != zk.DELETING:\n if not self._inProgressUpload(upload):\n data = zk.ImageUpload()\n data.state = zk.DELETING\n self._zk.storeImageUpload(upload.image_name, upload.build_id,\n upload.provider_name, data,\n upload.id)\n deleted = True\n\n if upload.state == zk.DELETING or deleted:\n manager = self._config.provider_managers[upload.provider_name]\n try:\n # It is possible we got this far, but don't actually have an\n # external_name. This could mean that zookeeper and cloud\n # provider are some how out of sync.\n if upload.external_name:\n base = \"-\".join([upload.image_name, upload.build_id])\n self.log.info(\"Deleting image build %s from %s\" %\n (base, upload.provider_name))\n manager.deleteImage(upload.external_name)\n except Exception:\n self.log.exception(\n \"Unable to delete image %s from %s:\",\n upload.external_name, upload.provider_name)\n else:\n self._zk.deleteUpload(upload.image_name, upload.build_id,\n upload.provider_name, upload.id)\n\n def _inProgressBuild(self, build, image):\n '''\n Determine if a DIB build is in progress.\n '''\n if build.state != zk.BUILDING:\n return False\n\n try:\n with self._zk.imageBuildLock(image, blocking=False):\n # An additional state check is needed to make sure it hasn't\n # changed on us. If it has, then let's pretend a build is\n # still in progress so that it is checked again later with\n # its new build state.\n b = self._zk.getBuild(image, build.id)\n if b.state != zk.BUILDING:\n return True\n pass\n except exceptions.ZKLockException:\n return True\n return False\n\n def _cleanup(self):\n '''\n Clean up builds on disk and in providers.\n '''\n known_providers = self._config.providers.values()\n image_names = self._zk.getImageNames()\n\n self._buildUploadRecencyTable()\n\n for image in image_names:\n try:\n self._cleanupImage(known_providers, image)\n except Exception:\n self.log.exception(\"Exception cleaning up image %s:\", image)\n\n def _filterLocalBuilds(self, image, builds):\n '''Return the subset of builds that are local'''\n ret = []\n for build in builds:\n base = \"-\".join([image, build.id])\n files = DibImageFile.from_image_id(self._config.imagesdir, base)\n if files:\n ret.append(build)\n return ret\n\n def _cleanupCurrentProviderUploads(self, provider, image, build_id):\n '''\n Remove cruft from a current build.\n\n Current builds (the ones we want to keep) are treated special since\n we want to remove any ZK nodes for uploads that failed exceptionally\n hard (i.e., we could not set the state to FAILED and they remain as\n UPLOADING), and we also want to remove any uploads that have been\n marked for deleting.\n '''\n cruft = self._zk.getUploads(image, build_id, provider,\n states=[zk.UPLOADING, zk.DELETING])\n for upload in cruft:\n if (upload.state == zk.UPLOADING and\n not self._inProgressUpload(upload)\n ):\n self.log.info(\"Removing failed upload record: %s\" % upload)\n self._zk.deleteUpload(image, build_id, provider, upload.id)\n elif upload.state == zk.DELETING:\n self.log.info(\"Removing deleted upload and record: %s\" % upload)\n self._deleteUpload(upload)\n\n def _cleanupImage(self, known_providers, image):\n '''\n Clean up one image.\n '''\n # Get the list of all builds, then work from that so that we\n # have a consistent view of the data.\n all_builds = self._zk.getBuilds(image)\n builds_to_keep = set([b for b in sorted(all_builds, reverse=True,\n key=lambda y: y.state_time)\n if b.state==zk.READY][:2])\n local_builds = set(self._filterLocalBuilds(image, all_builds))\n diskimage = self._config.diskimages.get(image)\n if not diskimage and not local_builds:\n # This builder is and was not responsible for this image,\n # so ignore it.\n return\n # Remove any local builds that are not in use.\n if not diskimage or (diskimage and not diskimage.in_use):\n builds_to_keep -= local_builds\n # TODO(jeblair): When all builds for an image which is not\n # in use are deleted, the image znode should be deleted as\n # well.\n\n for build in all_builds:\n # Start by deleting any uploads that are no longer needed\n # because this image has been removed from a provider\n # (since this should be done regardless of the build\n # state).\n for provider in known_providers:\n try:\n self._cleanupObsoleteProviderUploads(provider, image,\n build.id)\n if build in builds_to_keep:\n self._cleanupCurrentProviderUploads(provider.name,\n image,\n build.id)\n except Exception:\n self.log.exception(\"Exception cleaning up uploads \"\n \"of build %s of image %s in \"\n \"provider %s:\",\n build, image, provider)\n\n # If the build is in the delete state, we will try to\n # delete the entire thing regardless.\n if build.state != zk.DELETING:\n # If it is in any other state, we will only delete it\n # if it is older than the most recent two ready\n # builds, or is in the building state but not actually\n # building.\n if build in builds_to_keep:\n continue\n elif self._inProgressBuild(build, image):\n continue\n\n for provider in known_providers:\n try:\n self._cleanupProvider(provider, image, build.id)\n except Exception:\n self.log.exception(\"Exception cleaning up build %s \"\n \"of image %s in provider %s:\",\n build, image, provider)\n\n uploads_exist = False\n for p in self._zk.getBuildProviders(image, build.id):\n if self._zk.getImageUploadNumbers(image, build.id, p):\n uploads_exist = True\n break\n\n if not uploads_exist:\n if build.state != zk.DELETING:\n with self._zk.imageBuildNumberLock(\n image, build.id, blocking=False\n ):\n build.state = zk.DELETING\n self._zk.storeBuild(image, build, build.id)\n\n # Release the lock here so we can delete the build znode\n if self._deleteLocalBuild(image, build.id, build.builder):\n if not self._zk.deleteBuild(image, build.id):\n self.log.error(\"Unable to delete build %s because\"\n \" uploads still remain.\", build)\n\n def run(self):\n '''\n Start point for the CleanupWorker thread.\n '''\n self._running = True\n while self._running:\n # Don't do work if we've lost communication with the ZK cluster\n while self._zk and (self._zk.suspended or self._zk.lost):\n self.log.info(\"ZooKeeper suspended. Waiting\")\n time.sleep(SUSPEND_WAIT_TIME)\n\n try:\n self._run()\n except Exception:\n self.log.exception(\"Exception in CleanupWorker:\")\n time.sleep(10)\n\n time.sleep(self._interval)\n\n provider_manager.ProviderManager.stopProviders(self._config)\n\n def _run(self):\n '''\n Body of run method for exception handling purposes.\n '''\n new_config = nodepool_config.loadConfig(self._config_path)\n if not self._config:\n self._config = new_config\n\n self._checkForZooKeeperChanges(new_config)\n provider_manager.ProviderManager.reconfigure(self._config, new_config,\n use_taskmanager=False)\n self._config = new_config\n\n self._cleanup()\n\n\nclass BuildWorker(BaseWorker):\n def __init__(self, name, config_path, interval, zk, dib_cmd):\n super(BuildWorker, self).__init__(config_path, interval, zk)\n self.log = logging.getLogger(\"nodepool.builder.BuildWorker.%s\" % name)\n self.name = 'BuildWorker.%s' % name\n self.dib_cmd = dib_cmd\n\n def _running_under_virtualenv(self):\n # NOTE: borrowed from pip:locations.py\n if hasattr(sys, 'real_prefix'):\n return True\n elif sys.prefix != getattr(sys, \"base_prefix\", sys.prefix):\n return True\n return False\n\n def _activate_virtualenv(self):\n \"\"\"Run as a pre-exec function to activate current virtualenv\n\n If we are invoked directly as /path/ENV/nodepool-builer (as\n done by an init script, for example) then /path/ENV/bin will\n not be in our $PATH, meaning we can't find disk-image-create.\n Apart from that, dib also needs to run in an activated\n virtualenv so it can find utils like dib-run-parts. Run this\n before exec of dib to ensure the current virtualenv (if any)\n is activated.\n \"\"\"\n if self._running_under_virtualenv():\n activate_this = os.path.join(sys.prefix, \"bin\", \"activate_this.py\")\n if not os.path.exists(activate_this):\n raise exceptions.BuilderError(\"Running in a virtualenv, but \"\n \"cannot find: %s\" % activate_this)\n execfile(activate_this, dict(__file__=activate_this))\n\n def _checkForScheduledImageUpdates(self):\n '''\n Check every DIB image to see if it has aged out and needs rebuilt.\n '''\n for diskimage in self._config.diskimages.values():\n # Check if we've been told to shutdown\n # or if ZK connection is suspended\n if not self.running or self._zk.suspended or self._zk.lost:\n return\n try:\n self._checkImageForScheduledImageUpdates(diskimage)\n except Exception:\n self.log.exception(\"Exception checking for scheduled \"\n \"update of diskimage %s\",\n diskimage.name)\n\n def _checkImageForScheduledImageUpdates(self, diskimage):\n '''\n Check one DIB image to see if it needs to be rebuilt.\n\n .. note:: It's important to lock the image build before we check\n the state time and then build to eliminate any race condition.\n '''\n # Check if diskimage builds are paused.\n if diskimage.pause:\n return\n\n if not diskimage.image_types:\n # We don't know what formats to build.\n return\n\n now = int(time.time())\n builds = self._zk.getMostRecentBuilds(1, diskimage.name, zk.READY)\n\n # If there is no build for this image, or it has aged out\n # or if the current build is missing an image type from\n # the config file, start a new build.\n if (not builds\n or (now - builds[0].state_time) >= diskimage.rebuild_age\n or not set(builds[0].formats).issuperset(diskimage.image_types)\n ):\n try:\n with self._zk.imageBuildLock(diskimage.name, blocking=False):\n # To avoid locking each image repeatedly, we have an\n # second, redundant check here to verify that a new\n # build didn't appear between the first check and the\n # lock acquisition. If it's not the same build as\n # identified in the first check above, assume another\n # BuildWorker created the build for us and continue.\n builds2 = self._zk.getMostRecentBuilds(1, diskimage.name, zk.READY)\n if builds2 and builds[0].id != builds2[0].id:\n return\n\n self.log.info(\"Building image %s\" % diskimage.name)\n\n data = zk.ImageBuild()\n data.state = zk.BUILDING\n data.builder = self._hostname\n\n bnum = self._zk.storeBuild(diskimage.name, data)\n data = self._buildImage(bnum, diskimage)\n self._zk.storeBuild(diskimage.name, data, bnum)\n except exceptions.ZKLockException:\n # Lock is already held. Skip it.\n pass\n\n def _checkForManualBuildRequest(self):\n '''\n Query ZooKeeper for any manual image build requests.\n '''\n for diskimage in self._config.diskimages.values():\n # Check if we've been told to shutdown\n # or if ZK connection is suspended\n if not self.running or self._zk.suspended or self._zk.lost:\n return\n try:\n self._checkImageForManualBuildRequest(diskimage)\n except Exception:\n self.log.exception(\"Exception checking for manual \"\n \"update of diskimage %s\",\n diskimage)\n\n def _checkImageForManualBuildRequest(self, diskimage):\n '''\n Query ZooKeeper for a manual image build request for one image.\n '''\n # Check if diskimage builds are paused.\n if diskimage.pause:\n return\n\n # Reduce use of locks by adding an initial check here and\n # a redundant check after lock acquisition.\n if not self._zk.hasBuildRequest(diskimage.name):\n return\n\n try:\n with self._zk.imageBuildLock(diskimage.name, blocking=False):\n # Redundant check\n if not self._zk.hasBuildRequest(diskimage.name):\n return\n\n self.log.info(\n \"Manual build request for image %s\" % diskimage.name)\n\n data = zk.ImageBuild()\n data.state = zk.BUILDING\n data.builder = self._hostname\n\n bnum = self._zk.storeBuild(diskimage.name, data)\n data = self._buildImage(bnum, diskimage)\n self._zk.storeBuild(diskimage.name, data, bnum)\n\n # Remove request on a successful build\n if data.state == zk.READY:\n self._zk.removeBuildRequest(diskimage.name)\n\n except exceptions.ZKLockException:\n # Lock is already held. Skip it.\n pass\n\n def _buildImage(self, build_id, diskimage):\n '''\n Run the external command to build the diskimage.\n\n :param str build_id: The ID for the build (used in image filename).\n :param diskimage: The diskimage as retrieved from our config file.\n\n :returns: An ImageBuild object of build-related data.\n\n :raises: BuilderError if we failed to execute the build command.\n '''\n base = \"-\".join([diskimage.name, build_id])\n image_file = DibImageFile(base)\n filename = image_file.to_path(self._config.imagesdir, False)\n\n env = os.environ.copy()\n env['DIB_RELEASE'] = diskimage.release\n env['DIB_IMAGE_NAME'] = diskimage.name\n env['DIB_IMAGE_FILENAME'] = filename\n\n # Note we use a reference to the nodepool config here so\n # that whenever the config is updated we get up to date\n # values in this thread.\n if self._config.elementsdir:\n env['ELEMENTS_PATH'] = self._config.elementsdir\n\n # send additional env vars if needed\n for k, v in diskimage.env_vars.items():\n env[k] = v\n\n img_elements = diskimage.elements\n img_types = \",\".join(diskimage.image_types)\n\n qemu_img_options = ''\n if 'qcow2' in img_types:\n qemu_img_options = DEFAULT_QEMU_IMAGE_COMPAT_OPTIONS\n\n cmd = ('%s -x -t %s --checksum --no-tmpfs %s -o %s %s' %\n (self.dib_cmd, img_types, qemu_img_options, filename,\n img_elements))\n\n log = logging.getLogger(\"nodepool.image.build.%s\" %\n (diskimage.name,))\n\n self.log.info('Running %s' % cmd)\n\n try:\n p = subprocess.Popen(\n shlex.split(cmd),\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n preexec_fn=self._activate_virtualenv,\n env=env)\n except OSError as e:\n raise exceptions.BuilderError(\n \"Failed to exec '%s'. Error: '%s'\" % (cmd, e.strerror)\n )\n\n while True:\n ln = p.stdout.readline()\n log.info(ln.strip())\n if not ln:\n break\n\n p.wait()\n\n # It's possible the connection to the ZK cluster could have been\n # interrupted during the build. If so, wait for it to return.\n # It could transition directly from SUSPENDED to CONNECTED, or go\n # through the LOST state before CONNECTED.\n while self._zk.suspended or self._zk.lost:\n self.log.info(\"ZooKeeper suspended during build. Waiting\")\n time.sleep(SUSPEND_WAIT_TIME)\n\n build_data = zk.ImageBuild()\n build_data.builder = self._hostname\n\n if self._zk.didLoseConnection:\n self.log.info(\"ZooKeeper lost while building %s\" % diskimage.name)\n self._zk.resetLostFlag()\n build_data.state = zk.FAILED\n elif p.returncode:\n self.log.info(\"DIB failed creating %s\" % diskimage.name)\n build_data.state = zk.FAILED\n else:\n self.log.info(\"DIB image %s is built\" % diskimage.name)\n build_data.state = zk.READY\n build_data.formats = img_types.split(\",\")\n\n if self._statsd:\n # record stats on the size of each image we create\n for ext in img_types.split(','):\n key = 'nodepool.dib_image_build.%s.%s.size' % (diskimage.name, ext)\n # A bit tricky because these image files may be sparse\n # files; we only want the true size of the file for\n # purposes of watching if we've added too much stuff\n # into the image. Note that st_blocks is defined as\n # 512-byte blocks by stat(2)\n size = os.stat(\"%s.%s\" % (filename, ext)).st_blocks * 512\n self.log.debug(\"%s created image %s.%s (size: %d) \" %\n (diskimage.name, filename, ext, size))\n self._statsd.gauge(key, size)\n\n return build_data\n\n def run(self):\n '''\n Start point for the BuildWorker thread.\n '''\n self._running = True\n while self._running:\n # Don't do work if we've lost communication with the ZK cluster\n while self._zk and (self._zk.suspended or self._zk.lost):\n self.log.info(\"ZooKeeper suspended. Waiting\")\n time.sleep(SUSPEND_WAIT_TIME)\n\n try:\n self._run()\n except Exception:\n self.log.exception(\"Exception in BuildWorker:\")\n time.sleep(10)\n\n time.sleep(self._interval)\n\n def _run(self):\n '''\n Body of run method for exception handling purposes.\n '''\n # NOTE: For the first iteration, we expect self._config to be None\n new_config = nodepool_config.loadConfig(self._config_path)\n if not self._config:\n self._config = new_config\n\n self._checkForZooKeeperChanges(new_config)\n self._config = new_config\n\n self._checkForScheduledImageUpdates()\n self._checkForManualBuildRequest()\n\n\nclass UploadWorker(BaseWorker):\n def __init__(self, name, config_path, interval, zk):\n super(UploadWorker, self).__init__(config_path, interval, zk)\n self.log = logging.getLogger(\"nodepool.builder.UploadWorker.%s\" % name)\n self.name = 'UploadWorker.%s' % name\n\n def _reloadConfig(self):\n '''\n Reload the nodepool configuration file.\n '''\n new_config = nodepool_config.loadConfig(self._config_path)\n if not self._config:\n self._config = new_config\n\n self._checkForZooKeeperChanges(new_config)\n provider_manager.ProviderManager.reconfigure(self._config, new_config,\n use_taskmanager=False)\n self._config = new_config\n\n def _uploadImage(self, build_id, upload_id, image_name, images, provider):\n '''\n Upload a local DIB image build to a provider.\n\n :param str build_id: Unique ID of the image build to upload.\n :param str upload_id: Unique ID of the upload.\n :param str image_name: Name of the diskimage.\n :param list images: A list of DibImageFile objects from this build\n that available for uploading.\n :param provider: The provider from the parsed config file.\n '''\n start_time = time.time()\n timestamp = int(start_time)\n\n image = None\n for i in images:\n if provider.image_type == i.extension:\n image = i\n break\n\n if not image:\n raise exceptions.BuilderInvalidCommandError(\n \"Unable to find image file of type %s for id %s to upload\" %\n (provider.image_type, build_id)\n )\n\n self.log.debug(\"Found image file of type %s for image id: %s\" %\n (image.extension, image.image_id))\n\n filename = image.to_path(self._config.imagesdir, with_extension=True)\n\n dummy_image = type('obj', (object,),\n {'name': image_name, 'id': image.image_id})\n\n ext_image_name = provider.template_hostname.format(\n provider=provider, image=dummy_image,\n timestamp=str(timestamp)\n )\n\n self.log.info(\"Uploading DIB image build %s from %s to %s\" %\n (build_id, filename, provider.name))\n\n manager = self._config.provider_managers[provider.name]\n provider_image = provider.images.get(image_name)\n if provider_image is None:\n raise exceptions.BuilderInvalidCommandError(\n \"Could not find matching provider image for %s\" % image_name\n )\n\n meta = provider_image.meta.copy()\n meta['nodepool_build_id'] = build_id\n meta['nodepool_upload_id'] = upload_id\n\n try:\n external_id = manager.uploadImage(\n ext_image_name, filename,\n image_type=image.extension,\n meta=meta,\n md5=image.md5,\n sha256=image.sha256,\n )\n except Exception:\n self.log.exception(\"Failed to upload image %s to provider %s\" %\n (image_name, provider.name))\n data = zk.ImageUpload()\n data.state = zk.FAILED\n return data\n\n if self._statsd:\n dt = int((time.time() - start_time) * 1000)\n key = 'nodepool.image_update.%s.%s' % (image_name,\n provider.name)\n self._statsd.timing(key, dt)\n self._statsd.incr(key)\n\n base = \"-\".join([image_name, build_id])\n self.log.info(\"Image build %s in %s is ready\" %\n (base, provider.name))\n\n data = zk.ImageUpload()\n data.state = zk.READY\n data.external_id = external_id\n data.external_name = ext_image_name\n return data\n\n def _checkForProviderUploads(self):\n '''\n Check for any image builds that need to be uploaded to providers.\n\n If we find any builds in the 'ready' state that haven't been uploaded\n to providers, do the upload if they are available on the local disk.\n '''\n for provider in self._config.providers.values():\n for image in provider.images.values():\n uploaded = False\n\n # Check if we've been told to shutdown\n # or if ZK connection is suspended\n if not self.running or self._zk.suspended or self._zk.lost:\n return\n try:\n uploaded = self._checkProviderImageUpload(provider, image)\n except Exception:\n self.log.exception(\"Error uploading image %s \"\n \"to provider %s:\",\n image.name, provider.name)\n\n # NOTE: Due to the configuration file disagreement issue\n # (the copy we have may not be current), if we took the time\n # to attempt to upload an image, let's short-circuit this loop\n # to give us a chance to reload the configuration file.\n if uploaded:\n return\n\n def _checkProviderImageUpload(self, provider, image):\n '''\n The main body of _checkForProviderUploads. This encapsulates\n checking whether an image for a provider should be uploaded\n and performing the upload. It is a separate function so that\n exception handling can treat all provider-image uploads\n indepedently.\n\n :returns: True if an upload was attempted, False otherwise.\n '''\n # Check if image uploads are paused.\n if provider.images.get(image.name).pause:\n return False\n\n # Search for the most recent 'ready' image build\n builds = self._zk.getMostRecentBuilds(1, image.name,\n zk.READY)\n if not builds:\n return False\n\n build = builds[0]\n\n # Search for locally built images. The image name and build\n # sequence ID is used to name the image.\n local_images = DibImageFile.from_image_id(\n self._config.imagesdir, \"-\".join([image.name, build.id]))\n if not local_images:\n return False\n\n # See if this image has already been uploaded\n upload = self._zk.getMostRecentBuildImageUploads(\n 1, image.name, build.id, provider.name, zk.READY)\n if upload:\n return False\n\n # See if this provider supports the available image formats\n if provider.image_type not in build.formats:\n return False\n\n try:\n with self._zk.imageUploadLock(\n image.name, build.id, provider.name,\n blocking=False\n ):\n # Verify once more that it hasn't been uploaded since the\n # last check.\n upload = self._zk.getMostRecentBuildImageUploads(\n 1, image.name, build.id, provider.name, zk.READY)\n if upload:\n return False\n\n # NOTE: Due to the configuration file disagreement issue\n # (the copy we have may not be current), we try to verify\n # that another thread isn't trying to delete this build just\n # before we upload.\n b = self._zk.getBuild(image.name, build.id)\n if b.state == zk.DELETING:\n return False\n\n # New upload number with initial state 'uploading'\n data = zk.ImageUpload()\n data.state = zk.UPLOADING\n upnum = self._zk.storeImageUpload(\n image.name, build.id, provider.name, data)\n\n data = self._uploadImage(build.id, upnum, image.name,\n local_images, provider)\n\n # Set final state\n self._zk.storeImageUpload(image.name, build.id,\n provider.name, data, upnum)\n return True\n except exceptions.ZKLockException:\n # Lock is already held. Skip it.\n return False\n\n def run(self):\n\n '''\n Start point for the UploadWorker thread.\n '''\n self._running = True\n while self._running:\n # Don't do work if we've lost communication with the ZK cluster\n while self._zk and (self._zk.suspended or self._zk.lost):\n self.log.info(\"ZooKeeper suspended. Waiting\")\n time.sleep(SUSPEND_WAIT_TIME)\n\n try:\n self._reloadConfig()\n self._checkForProviderUploads()\n except Exception:\n self.log.exception(\"Exception in UploadWorker:\")\n time.sleep(10)\n\n time.sleep(self._interval)\n\n provider_manager.ProviderManager.stopProviders(self._config)\n\n\nclass NodePoolBuilder(object):\n '''\n Main class for the Nodepool Builder.\n\n The builder has the responsibility to:\n\n * Start and maintain the working state of each worker thread.\n '''\n log = logging.getLogger(\"nodepool.builder.NodePoolBuilder\")\n\n def __init__(self, config_path, num_builders=1, num_uploaders=4):\n '''\n Initialize the NodePoolBuilder object.\n\n :param str config_path: Path to configuration file.\n :param int num_builders: Number of build workers to start.\n :param int num_uploaders: Number of upload workers to start.\n '''\n self._config_path = config_path\n self._config = None\n self._num_builders = num_builders\n self._build_workers = []\n self._num_uploaders = num_uploaders\n self._upload_workers = []\n self._janitor = None\n self._running = False\n self.cleanup_interval = 60\n self.build_interval = 10\n self.upload_interval = 10\n self.dib_cmd = 'disk-image-create'\n self.zk = None\n\n # This lock is needed because the run() method is started in a\n # separate thread of control, which can return before the scheduler\n # has completed startup. We need to avoid shutting down before the\n # startup process has completed.\n self._start_lock = threading.Lock()\n\n #=======================================================================\n # Private methods\n #=======================================================================\n\n def _getAndValidateConfig(self):\n config = nodepool_config.loadConfig(self._config_path)\n if not config.zookeeper_servers.values():\n raise RuntimeError('No ZooKeeper servers specified in config.')\n if not config.imagesdir:\n raise RuntimeError('No images-dir specified in config.')\n return config\n\n #=======================================================================\n # Public methods\n #=======================================================================\n\n def start(self):\n '''\n Start the builder.\n\n The builder functionality is encapsulated within threads run\n by the NodePoolBuilder. This starts the needed sub-threads\n which will run forever until we tell them to stop.\n '''\n with self._start_lock:\n if self._running:\n raise exceptions.BuilderError('Cannot start, already running.')\n\n self._config = self._getAndValidateConfig()\n self._running = True\n\n # All worker threads share a single ZooKeeper instance/connection.\n self.zk = zk.ZooKeeper()\n self.zk.connect(self._config.zookeeper_servers.values())\n\n self.log.debug('Starting listener for build jobs')\n\n # Create build and upload worker objects\n for i in range(self._num_builders):\n w = BuildWorker(i, self._config_path, self.build_interval,\n self.zk, self.dib_cmd)\n w.start()\n self._build_workers.append(w)\n\n for i in range(self._num_uploaders):\n w = UploadWorker(i, self._config_path, self.upload_interval,\n self.zk)\n w.start()\n self._upload_workers.append(w)\n\n self._janitor = CleanupWorker(0, self._config_path,\n self.cleanup_interval, self.zk)\n self._janitor.start()\n\n # Wait until all threads are running. Otherwise, we have a race\n # on the worker _running attribute if shutdown() is called before\n # run() actually begins.\n while not all([\n x.running for x in (self._build_workers\n + self._upload_workers\n + [self._janitor])\n ]):\n time.sleep(0)\n\n def stop(self):\n '''\n Stop the builder.\n\n Signal the sub threads to begin the shutdown process. We don't\n want this method to return until the scheduler has successfully\n stopped all of its own threads.\n '''\n with self._start_lock:\n self.log.debug(\"Stopping. NodePoolBuilder shutting down workers\")\n for worker in (self._build_workers\n + self._upload_workers\n + [self._janitor]\n ):\n worker.shutdown()\n\n self._running = False\n\n self.log.debug('Waiting for jobs to complete')\n\n # Do not exit until all of our owned threads exit.\n for worker in (self._build_workers\n + self._upload_workers\n + [self._janitor]\n ):\n worker.join()\n\n self.log.debug('Terminating ZooKeeper connection')\n self.zk.disconnect()\n\n self.log.debug('Stopping providers')\n provider_manager.ProviderManager.stopProviders(self._config)\n self.log.debug('Finished stopping')\n","sub_path":"nodepool/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":44843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"372359413","text":"#====================\nimport tkinter as tk\nfrom tkinter import ttk\n#====================\n\n# Create Instance\nwin = tk.Tk()\n\n# Add a Title\nwin.title(\"Python GUI\")\n\n# Adding a Label that will get modified\na_label = ttk.Label(win, text=\"A_label\")\na_label.grid(column=0, row=0)\n\n# Button Click Event Function\ndef click_me():\n action.configure(text=\"** i have been Clicked! **\")\n a_label.configure(foreground=\"red\")\n a_label.configure(text=\"A Red label\")\n\n# Adding a Textbox Entry widget\nname = tk.StringVar()\nname_entered = ttk.Entry(win, width=12, textvariable=name)\nname_entered.grid(column=0, row=1)\n\n# Adding a button\naction = ttk.Button(win, text=\"Click me!\", command=click_me)\naction.grid(column=1, row=1)\n# Disable cursor into name Entry\n#action.configure(state='disabled')\n\nname_entered.focus()\n#====================\n# Start GUI\n#====================\nwin.mainloop()\n\n\n","sub_path":"CH01_code/GUI_set_focus.py","file_name":"GUI_set_focus.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"100608534","text":"__author__ = \"Luke Liu\"\n#encoding=\"utf-8\"\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import load_iris\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.cluster import KMeans\nfrom sklearn.manifold import TSNE\nimport os\nimport numpy as np\nfrom PIL import Image\ndef unpickle(file):\n import pickle\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\npaths_labels=unpickle(\"AM_CGAN\")\nimages_path = paths_labels['images_path']\nredefined_labels=paths_labels['scores_path']\n\nindex=[0,1,2,3,4,5,6,7]\nscore_rank=[1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5]\nindex_scores_dict=dict(list(zip(score_rank,index)))\nredefined_labels_index=[]\nfor i in redefined_labels:\n redefined_labels_index.append(index_scores_dict[i])\n#处理image\nimages_array=np.zeros((2000,64,64,3),dtype=np.float32)\ndataset_path='D:\\BaiduYunDownload\\python_exe\\dataset\\scut_faces\\Images'\nimages_paths=[os.path.join(dataset_path,i) for i in images_path]\nfor i in range(2000):\n img=Image.open(images_paths[i])\n img=img.resize((64,64))\n img=np.asarray(img)\n im=img/255.\n images_array[0,:,:,:]+=im\n print(\"finis {}/2000\".format(i+1))\nprint(len(list(set(redefined_labels_index))))\nimages_array=np.reshape(images_array,(2000,64*64*3))\n#直接数据归一化\nscale = MinMaxScaler().fit(images_array) # 训练规则\nface_dataScale = scale.transform(images_array) # 应用规则\n# 建立一个聚类为8的点\nkmeans = KMeans(n_clusters=8,random_state=12).fit(face_dataScale) # 构建并训练模型\n\n\n''' 聚类结果可视化 '''\nprint(\"waiting for the tsne\")\ntsne = TSNE(n_components=2,init='random',random_state=17).fit_transform(face_dataScale) # 使用TSNE进行数据降维,降成两维\ndf = pd.DataFrame(tsne.embedding_) # 将原始数据转换为DataFrame\nprint(\"tsne is ok...\")\ndf['labels'] = kmeans.labels_ # 将聚类结果存储进df数据表中\ndf1 = df[df['labels']==0]\ndf2 = df[df['labels']==1]\ndf3 = df[df['labels']==2]\ndf4=df[df['labels']==3]\ndf5=df[df['labels']==4]\ndf6 = df[df['labels']==5]\ndf7 = df[df['labels']==6]\ndf8=df[df['labels']==7]\n# fig = plt.figure(figsize=(9,6)) # 绘制图形 设定空白画布,并制定大小\nplt.plot(df1[0],df1[1],'bo',df2[0],df2[1],'r*',df3[0],df3[1],'gD',df4[0],df4[1],'yo',df5[0],df5[1],'gray',df6[0],df6[1],'green',df7[0],df7[1],'blue',df8[0],df8[1],'r')\nplt.show() # 显示图片","sub_path":"Day08_20190909/data_preprocess/K_means_聚类.py","file_name":"K_means_聚类.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"278709878","text":"# I learned how to implement binary search tree from https://github.com/peterhil/leftrb/blob/master/leftrb\n# Part of my code are original from https://www.cs.princeton.edu/~rs/AlgsDS07/09BalancedTrees.pdf and course slides\n\n\nclass binary_search_tree(object):\n root = None\n\n class Node(object):\n def __init__(self, key, val=None):\n self.key = key\n self.val = val\n self.l_child = None\n self.r_child = None\n\n def insert(self, key, val=None):\n if self.key == key:\n self.val = val\n elif self.key > key:\n if self.l_child is None:\n self.l_child = binary_search_tree.Node(key, val)\n else:\n self.l_child = self.l_child.insert(key, val)\n else:\n if self.r_child is None:\n self.r_child = binary_search_tree.Node(key, val)\n else:\n self.r_child = self.r_child.insert(key, val)\n return self\n\n def search(self, key):\n if self.key == key:\n if self.val is None:\n return self.key\n else:\n return self.val\n elif key < self.key and self.l_child:\n return self.l_child.search(key)\n elif key > self.key and self.r_child:\n return self.r_child.search(key)\n else:\n return None\n\n def find_min(self):\n if self.l_child is None:\n return self\n else:\n return self.l_child.find_min()\n\n def find_max(self):\n if self.r_child is None:\n return self\n else:\n return self.r_child.find_max()\n\n def insert_in_tree(self, key, val=None):\n if self.root is None:\n self.root = self.Node(key, val)\n else:\n self.root.insert(key, val)\n\n def search_in_tree(self, key):\n if self.root is None:\n return None\n else:\n return self.root.search(key)\n","sub_path":"Assignment3/Q5/bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"609019964","text":"from django import template # pragma: no cover\nfrom lxml import html\nfrom lxml.etree import tostring\nfrom django.urls import reverse\n\nregister = template.Library() # pragma: no cover\n\n\ndef div_builder(progress, chapter_id):\n url = reverse(\n \"profiles:reading-progress\",\n kwargs={\"chapter_id\": chapter_id, \"progress\": progress},\n )\n div = f'
'\n return div\n\n\n@register.filter(name=\"read_inserter\")\ndef read_inserter(html_string, chapter_id):\n\n content = \"\"\n tree = html.fromstring(html_string)\n for anchor in tree.xpath(\"//a\"):\n anchor.drop_tag()\n for script in tree.xpath(\"//script\"):\n script.drop_tag()\n for span in tree.xpath(\"//*[@lang]\"):\n span.drop_tag()\n if tree.text is not None and tree.text.strip() != \"\":\n content += tree.text\n children = tree.getchildren()\n for index, element in enumerate(children, 1):\n content += tostring(element).decode(\"unicode-escape\")\n if index % 10 == 0:\n content += div_builder(index, chapter_id)\n return content\n","sub_path":"src/utils/templatetags/read_inserter.py","file_name":"read_inserter.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"342368575","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 27 22:15:16 2018\n\n@author: susmeher\n\"\"\"\n\nimport pandas as pd\n\n\nnr7_file_path = 'D:\\\\Users\\\\susmeher\\\\Downloads\\\\NR7NR4.xlsx' #input('NR7 file path: ')\nvol_file_path = 'D:\\\\code\\\\StockAnalysis.xlsx' #input('Volatility file path: ')\n\nnr7_data = pd.read_excel(nr7_file_path)\nvol_data = pd.read_excel(vol_file_path)\ni = 1\nfor sym in nr7_data['Symbol']:\n if sym in list(vol_data['SYMBOL']):\n v = vol_data[vol_data.SYMBOL == sym].V.item()\n if v > 2.0:\n print(str(i) + '. ' + sym + ':' + str(v))\n i += 1\n","sub_path":"old/nr7_shortlist.py","file_name":"nr7_shortlist.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"635708245","text":"# -*- coding:utf-8 -*-\n#\n# 合并两个排序的链表\n\n\nclass ListNode:\n\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution:\n # 返回合并后列表\n\n def Merge(self, pHead1, pHead2):\n if pHead1 is None:\n return pHead2\n if pHead2 is None:\n return pHead1\n\n new_head, node = None, None\n while pHead1 is not None and pHead2 is not None:\n if pHead1.val <= pHead2.val:\n if new_head is None:\n new_head = pHead1\n node = new_head\n pHead1 = pHead1.next\n else:\n node.next = pHead1\n node = node.next\n pHead1 = pHead1.next\n else:\n if new_head is None:\n new_head = pHead2\n node = new_head\n pHead2 = pHead2.next\n else:\n node.next = pHead2\n node = node.next\n pHead2 = pHead2.next\n\n if pHead1 is not None:\n node.next = pHead1\n\n if pHead2 is not None:\n node.next = pHead2\n\n return new_head\n","sub_path":"剑指offer/13.合并两个排序的链表.py","file_name":"13.合并两个排序的链表.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"565060085","text":"from future.utils import iteritems\nimport math\nimport numpy as np\nimport os\n\nfrom pydrake.common.cpp_param import List\nfrom pydrake.common.eigen_geometry import Quaternion\nfrom pydrake.common.value import Value\nfrom pydrake.math import (\n RigidTransform,\n RollPitchYaw,\n RotationMatrix\n)\nfrom pydrake.multibody.math import SpatialForce\nfrom pydrake.multibody.parsing import Parser\nfrom pydrake.multibody.plant import (\n ExternallyAppliedSpatialForce\n)\nfrom pydrake.systems.analysis import Simulator\nfrom pydrake.systems.framework import (\n BasicVector,\n DiagramBuilder,\n LeafSystem\n)\nfrom pydrake.systems.primitives import (\n ConstantVectorSource\n)\n\nfrom gym.envs.robot_locomotion_group.drake.shoe.floating_hand_controllers import (\n SpatialHandController,\n SetpointController,\n set_targets\n)\nfrom gym.envs.robot_locomotion_group.drake.shoe.rope_utils import (\n post_finalize_rope_settings,\n initialize_rope_zero\n)\nfrom gym.envs.robot_locomotion_group.drake.shoe.manipulation_diagram import ManipulationDiagram\n\n\ndef build_shoe_diagram(config):\n builder = DiagramBuilder()\n\n station = builder.AddSystem(ManipulationDiagram(config))\n station.add_rope_and_ground(include_ground=False)\n if 'arms' in config['env']:\n station.add_arms_from_config(config)\n parser = Parser(station.mbp, station.sg)\n shoe_dir = os.path.dirname(os.path.abspath(__file__))\n model_file = os.path.join(shoe_dir, \"model/shoe.sdf\")\n shoe_model = parser.AddModelFromFile(model_file, \"shoe\")\n if config[\"env\"][\"visualization\"]:\n station.connect_to_drake_visualizer()\n visualizer = None\n if \"meshcat\" in config[\"env\"] and config[\"env\"][\"meshcat\"]:\n visualizer = station.connect_to_meshcat()\n if config[\"env\"][\"parameterization\"] == \"closed\":\n left_rope_point = station.add_vis_object(\"left_rope\", [1, 0, 0, 1])\n right_rope_point = station.add_vis_object(\"right_rope\", [0, 1, 0, 1])\n left_target_point = station.add_vis_object(\"left_target_point\", [1, 0, 0, 1])\n right_target_point = station.add_vis_object(\"right_target_point\", [0, 1, 0, 1])\n if config[\"env\"][\"rgbd_sensors\"][\"enabled\"]:\n station.add_rgbd_sensors_from_config(config)\n\n station.finalize()\n\n post_finalize_rope_settings(config, station.mbp, station.sg)\n\n targets = {}\n\n if 'arms' in config['env']:\n gripper_info = {}\n for arm_name, arm_config in iteritems(config['env']['arms']):\n # Add PID Control\n gripper = station.mbp.GetBodyByName(\"body\", station.model_ids[arm_name])\n gripper_info[arm_name] = gripper.index()\n\n # Initialize targets from file\n init = config[\"env\"][\"arms\"][arm_name][\"rpy\"][:]\n init.extend(config[\"env\"][\"arms\"][arm_name][\"pos\"])\n targets[arm_name] = builder.AddSystem(ConstantVectorSource(init))\n width_init = config[\"env\"][\"arms\"][arm_name][\"grip\"]\n targets[f\"{arm_name}_width\"] = builder.AddSystem(ConstantVectorSource([width_init]))\n pid = builder.AddSystem(SpatialHandController(gripper_info))\n builder.Connect(station.GetOutputPort(f\"body_poses\"), pid.GetInputPort(\"body_positions\"))\n sp_control = builder.AddSystem(SetpointController(gripper_info, {\"position\": [0.005, 0.005, 0.005, 0.0003, 0.0003, 0.0003],\n \"width\": 0.001}))\n for arm_name, arm_config in iteritems(config['env']['arms']):\n builder.Connect(targets[arm_name].get_output_port(0),\n sp_control.GetInputPort(f\"{arm_name}_target\"))\n builder.Connect(sp_control.GetOutputPort(f\"{arm_name}_setpoint\"),\n pid.GetInputPort(f\"{arm_name}_desired\"))\n builder.Connect(targets[f\"{arm_name}_width\"].get_output_port(0),\n sp_control.GetInputPort(f\"{arm_name}_width_target\"))\n builder.Connect(sp_control.GetOutputPort(f\"{arm_name}_width_setpoint\"),\n station.GetInputPort(f\"{arm_name}_position\"))\n builder.Connect(pid.GetOutputPort(\"spatial_forces_vector\"),\n station.GetInputPort(\"spatial_input\"))\n diagram = builder.Build()\n\n simulator = Simulator(diagram)\n sim_context = simulator.get_mutable_context()\n station_context = diagram.GetMutableSubsystemContext(station, sim_context)\n\n systems = {\"station\": station,\n \"targets\": targets,\n \"sp_control\": sp_control,\n \"pid\": pid}\n if config[\"env\"][\"parameterization\"] == \"closed\":\n systems[\"left_rope\"] = left_rope_point\n systems[\"right_rope\"] = right_rope_point\n systems[\"left_target_point\"] = left_target_point\n systems[\"right_target_point\"] = right_target_point\n if 'arms' in config['env']:\n values = {}\n for arm_name, arm_config in iteritems(config['env']['arms']):\n station.GetInputPort(f\"{arm_name}_force_limit\").FixValue(\n station_context, 40.)\n simulator.set_target_realtime_rate(config['env']['target_realtime_rate'])\n reset_simulator_from_config(config, simulator, diagram, systems)\n\n return simulator, diagram, systems, visualizer\n\ndef reset_simulator_from_config(config, simulator, diagram, systems):\n for rope_name, _ in iteritems(config['env']['ropes']):\n initialize_rope_zero(diagram, simulator, systems[\"station\"], rope_name)\n sim_context = simulator.get_mutable_context()\n station_context = diagram.GetMutableSubsystemContext(systems[\"station\"], sim_context)\n if 'arms' in config['env']:\n values = {}\n for arm_name, arm_config in iteritems(config['env']['arms']):\n rpy = RollPitchYaw(config[\"env\"][\"arms\"][arm_name][\"rpy\"])\n xyz = config[\"env\"][\"arms\"][arm_name][\"pos\"]\n init_state = np.append(rpy.vector(), xyz)\n grip = config[\"env\"][\"arms\"][arm_name][\"grip\"]\n quat = rpy.ToQuaternion()\n systems[\"station\"].set_model_state(station_context, arm_name,\n np.array([quat.w(), quat.x(), quat.y(), quat.z(), xyz[0], xyz[1], xyz[2], -grip/2, grip/2]), np.zeros(8))\n values[arm_name] = init_state\n values[f\"{arm_name}_width\"] = grip\n set_targets(simulator, diagram, systems, values)\n sp_context = diagram.GetMutableSubsystemContext(systems[\"sp_control\"], sim_context)\n if 'arms' in config['env']:\n for arm_name, arm_config in iteritems(config['env']['arms']):\n systems[\"sp_control\"].SetPositions(sp_context, arm_name,\n np.append(config[\"env\"][\"arms\"][arm_name][\"rpy\"], config[\"env\"][\"arms\"][arm_name][\"pos\"]),\n [config[\"env\"][\"arms\"][arm_name][\"grip\"]])\n\n pid_context = diagram.GetMutableSubsystemContext(systems[\"pid\"], sim_context)\n systems[\"pid\"].reset(pid_context)\n simulator.set_target_realtime_rate(config['env']['target_realtime_rate'])\n sim_context.SetTime(0.)\n simulator.Initialize()","sub_path":"gym/envs/robot_locomotion_group/drake/shoe/build_shoe_diagram.py","file_name":"build_shoe_diagram.py","file_ext":"py","file_size_in_byte":7043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"430097471","text":"\"\"\" Render our templates \"\"\"\nfrom __future__ import print_function\nimport os\nimport sys\nimport traceback\nfrom pkg_resources import resource_listdir, resource_filename\nimport yaml\nfrom jinja2 import Environment, FileSystemLoader, meta\nimport jinja2.nodes\nimport jinja2.exceptions\nfrom cryptorito import portable_b64encode, portable_b64decode\nfrom aomi.helpers import merge_dicts, abspath, cli_hash\nimport aomi.exceptions as aomi_excep\n# Python 2/3 compat\nfrom future.utils import iteritems # pylint: disable=E0401\n\n\ndef grok_default_vars(parsed_content):\n \"\"\"Returns a list of vars for which there is a default being set\"\"\"\n default_vars = []\n for element in parsed_content.body:\n if isinstance(element, jinja2.nodes.Output):\n for node in element.nodes:\n if isinstance(node, jinja2.nodes.Filter):\n if node.name == 'default' \\\n and node.node.name not in default_vars:\n default_vars.append(node.node.name)\n elif isinstance(element, jinja2.nodes.For):\n if isinstance(element.iter, jinja2.nodes.Filter):\n if element.iter.name == 'default' \\\n and element.iter.node.name not in default_vars:\n default_vars.append(element.iter.node.name)\n\n return default_vars\n\n\ndef render(filename, obj):\n \"\"\"Render a template, maybe mixing in extra variables\"\"\"\n template_path = abspath(filename)\n fs_loader = FileSystemLoader(os.path.dirname(template_path))\n env = Environment(loader=fs_loader,\n autoescape=True,\n trim_blocks=True,\n lstrip_blocks=True)\n env.filters['b64encode'] = portable_b64encode\n env.filters['b64decode'] = portable_b64decode\n parsed_content = env.parse(env\n .loader\n .get_source(env,\n os.path.basename(template_path)))\n\n template_vars = meta.find_undeclared_variables(parsed_content)\n if template_vars:\n missing_vars = []\n default_vars = grok_default_vars(parsed_content)\n for var in template_vars:\n if var not in default_vars and var not in obj:\n missing_vars.append(var)\n\n if missing_vars:\n e_msg = \"Missing required variables %s\" % ','.join(missing_vars)\n raise aomi_excep.AomiData(e_msg)\n\n try:\n return env \\\n .get_template(os.path.basename(template_path)) \\\n .render(**obj)\n except jinja2.exceptions.TemplateSyntaxError as exception:\n template_trace = traceback.format_tb(sys.exc_info()[2])\n raise aomi_excep.Validation(\"Bad template %s %s\" %\n (template_trace[len(template_trace) - 1],\n str(exception)))\n except jinja2.exceptions.UndefinedError as exception:\n template_traces = [x.strip()\n for x in traceback.format_tb(sys.exc_info()[2])\n if 'template code' in x]\n raise aomi_excep.Validation(\"Missing template variable %s\" %\n ' '.join(template_traces))\n\n\ndef load_var_files(opt):\n \"\"\"Load variable files, merge, return contents\"\"\"\n obj = {}\n for var_file in opt.extra_vars_file:\n yamlz = yaml.safe_load(open(abspath(var_file)).read())\n obj = merge_dicts(obj.copy(), yamlz)\n\n return obj\n\n\ndef load_template_help(builtin):\n \"\"\"Loads the help for a given template\"\"\"\n\n help_file = \"templates/%s-help.yml\" % builtin\n help_file = resource_filename(__name__, help_file)\n help_obj = {}\n if os.path.exists(help_file):\n help_data = yaml.safe_load(open(help_file))\n if 'name' in help_data:\n help_obj['name'] = help_data['name']\n\n if 'help' in help_data:\n help_obj['help'] = help_data['help']\n\n if 'args' in help_data:\n help_obj['args'] = help_data['args']\n\n return help_obj\n\n\ndef builtin_list():\n \"\"\"Show a listing of all our builtin templates\"\"\"\n for template in resource_listdir(__name__, \"templates\"):\n builtin, ext = os.path.splitext(os.path.basename(abspath(template)))\n if ext == '.yml':\n continue\n\n help_obj = load_template_help(builtin)\n if 'name' in help_obj:\n print(\"%-*s %s\" % (20, builtin, help_obj['name']))\n else:\n print(\"%s\" % builtin)\n\n\ndef builtin_info(builtin):\n \"\"\"Show information on a particular builtin template\"\"\"\n help_obj = load_template_help(builtin)\n if help_obj.get('name') and help_obj.get('help'):\n print(\"The %s template\" % (help_obj['name']))\n print(help_obj['help'])\n else:\n print(\"No help for %s\" % builtin)\n\n if help_obj.get('args'):\n for arg, arg_help in iteritems(help_obj['args']):\n print(\" %-*s %s\" % (20, arg, arg_help))\n\n\ndef get_secretfile(opt):\n \"\"\"Returns the de-YAML'd rendered Secretfile\"\"\"\n return yaml.safe_load(render_secretfile(opt))\n\n\ndef render_secretfile(opt):\n \"\"\"Renders and returns the Secretfile construct\"\"\"\n secretfile_path = abspath(opt.secretfile)\n obj = merge_dicts(load_var_files(opt),\n cli_hash(opt.extra_vars))\n return render(secretfile_path, obj)\n","sub_path":"aomi/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":5364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"480241526","text":"from __future__ import annotations\nfrom typing import MutableSequence, Sequence, Mapping, Union, Optional, Any\n\nfrom pyrates.util.constants import Types, Constants\nfrom pyrates.logger.logger import mLogger\n\n\nclass Rate:\n \"\"\"\n A class to hold methods and attributes for any given rate\n\n Properties:\n\n name -> string\n code -> string\n fromEuro -> float\n toEuro -> float\n\n Methods:\n\n Convert(toRate, amount) -> float\n GetTableString() -> string\n \n Static Methods:\n\n GetRate(rateCode, rates) -> Rate\n GetDictableRate(rateCode, rates) -> Mapping\n GenerateRates(data) -> Sequence\n \"\"\"\n def __init__(self, name: str, code: str, fromEuro: float, toEuro: float) -> None:\n self.__name: str = name\n self.__code: str = code\n self.__fromEuro: float = fromEuro\n self.__toEuro: float = toEuro\n\n @property\n def name(self) -> str:\n return self.__name\n\n @property\n def code(self) -> str:\n return self.__code\n\n @property\n def fromEuro(self) -> float:\n return self.__fromEuro\n\n @property\n def toEuro(self) -> float:\n return self.__toEuro\n \n def Convert(self, toRate: Rate, amount: float = 1) -> float:\n \"\"\"\n Converts a given amount of self into toRate \n\n Parameters:\n toRate (Rate): Rate object target for currency conversion\n\n Returns:\n conversion (float) : The amount of self converted into toRate\n \"\"\"\n if isinstance(toRate, Rate):\n return (self.toEuro / toRate.toEuro) * amount\n mLogger.critical(f\"ConvertException: toRate is not type Rate but type '{type(toRate)}'\")\n raise TypeError(\"toRate argument must be of type 'Rate' and not type '%s'\" % type(toRate))\n\n def GetTableString(self) -> str:\n \"\"\"\n Generates a string to be used by PyRates __repr__ method\n\n Returns:\n tableString (str) : A string confining Rate data into a table-like structure\n \"\"\"\n return f\"\"\"| {self.__Fill(self.code, Constants.nameStringLength)}| {self.__Fill(str(self.fromEuro), Constants.rateStringLength)}| {self.__Fill(str(self.toEuro), Constants.rateStringLength)}\n|==========================================================================|\n\"\"\"\n\n def __Fill(self, inputString, limit) -> str:\n \"\"\"\n Used by GetTableString to generate whitespaces, such that the spacing between Rates in the table-like structure are consistent\n\n Parameters:\n inputString (str): String to be filled with whitespaces\n limit (int): Target length of the string\n\n Returns:\n result (str): inputString filled with the appropiate amount of whitespaces\n \"\"\"\n for i in range((limit - len(inputString))):\n inputString += \" \"\n return inputString\n\n @staticmethod\n def GetRate(rateCode: str, rates: Sequence[Rate]) -> Optional[Rate]:\n \"\"\"\n Return a Rate object.\n \n Return None if the rate could not be found.\n\n Parameters:\n rateCode (str) : Three letter currencycode string\n rates (Sequence) : Sequence of Rate objects\n\n Returns:\n result (Rate, None) : Rate object or None\n \"\"\"\n rate: Rate\n for rate in rates:\n if rate.code.upper() == rateCode.upper():\n return rate\n mLogger.debug(f\"GetRate: could not retrieve '{rateCode}' from rate sequence\")\n return None\n\n @staticmethod\n def GetDictableRate(rateCode: str, rates: Sequence[Types.DictableRate]) -> Optional[Types.DictableRate]:\n \"\"\"\n Return a dictionary with the given rate information\n \n Return None if the rate could not be found\n\n Parameters:\n rateCode (str) : Three letter currencycode string\n rates (Sequence) : Sequence of rate dictionaries\n\n Returns:\n result (Sequence, None) : Dictionary with given rate information or None\n \"\"\"\n rate: Types.DictableRate\n for rate in rates:\n name: Any = rate[Constants.currencyCode]\n if isinstance(name, str) and name.upper() == rateCode.upper():\n return rate\n mLogger.debug(f\"GetDictableRate: could not retrieve '{rateCode}' from rate mapping\")\n return None\n\n @staticmethod\n def GenerateRates(data: Sequence[Types.DictableRate]) -> Sequence[Rate]:\n \"\"\"\n Generates Rate objects from a Sequence of rate dictionaries\n\n Parameters:\n data (Sequence) : Sequence of rate dictionaries\n\n Returns:\n rates (Sequence) : Sequence of Rate objects\n \"\"\"\n if not isinstance(data, Sequence):\n mLogger.critical(f\"GenerateRatesException: data is not type sequence but type '{type(data)}'\")\n raise TypeError(\"Argument to function GenerateRates must be a of a sequence type not of type '%s'\" % type(data))\n generatedRates: MutableSequence[Rate] = []\n item: Mapping[str, Union[str, float]]\n for item in data:\n rate: Rate = Rate(str(item[\"name\"]), str(item[\"currency_code\"]), float(item[\"from_euro\"]), float(item[\"to_euro\"]))\n generatedRates.append(rate)\n generatedRates.append(Rate(\"Euro\", \"EUR\", 1.0, 1.0))\n return generatedRates\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, Rate):\n return self.name == other.name and self.code == other.code\n mLogger.warning(f\"RateEqualityException: can only compare a Rate type with another Rate, not type '{type(other)}'\")\n return False\n","sub_path":"pyrates/rate/rate.py","file_name":"rate.py","file_ext":"py","file_size_in_byte":6097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"137960521","text":"from keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import LSTM\r\nfrom keras.layers import Dropout\r\nfrom keras.layers import Conv1D, MaxPooling1D\r\nfrom keras.initializers import RandomUniform\r\nfrom keras.layers import Dense, Dropout, Flatten, ZeroPadding1D, GlobalAveragePooling1D\r\nfrom keras.layers.embeddings import Embedding\r\n\r\n\r\n\r\n\r\n\r\ndef ltsm_model(embeddings,\r\n\t\t\t\tvocabulary_size = 50000,\r\n\t\t\t\tembed_size = 100,\r\n\t\t\t\tmessage_length = 250,\r\n\t\t\t\tnbtargets = 22,\r\n\t\t\t\tchosenLoss = 'categorical_crossentropy',\r\n\t\t\t\tchosenOptimizer = 'adam') :\r\n \"\"\"\r\n Architecture used for v2 : classify PE mails among the 21 Cogito categories\r\n\r\n embeddings : np.array,\r\n Pretrained embedding matrix.\r\n\r\n seq_max : int, optional\r\n Maximum input length.\r\n Default value, 250.\r\n\r\n embed_size : size of the vector used for a word. This parameter is set during the embedding process.\r\n Default value : 100\r\n\r\n loss : str, optional\r\n Loss function for training.\r\n Default value, 'categorical_crossentropy'.\r\n\r\n ntargets : int, optional\r\n Dimension of model output.\r\n Default value, 22.\r\n\r\n Returns\r\n -------\r\n Model instance\r\n\r\n \"\"\"\r\n model = Sequential()\r\n model.add(Embedding(vocabulary_size, embed_size, weights=[embeddings], input_length=message_length, trainable=False))\r\n model.add(LSTM(150, return_sequences=True))\r\n model.add(Dropout(0.5))\r\n model.add(LSTM(100))\r\n model.add(Dropout(0.5))\r\n model.add(Dense(75, activation='relu'))\r\n model.add(Dropout(0.5))\r\n model.add(Dense(nbtargets, activation='softmax'))\r\n model.compile(loss = chosenLoss, optimizer = chosenOptimizer, metrics=['accuracy'])\r\n return model\r\n\r\n\r\n\r\ndef cnn_model_test(embeddings,\r\n\t\t\t\tvocabulary_size = 50000,\r\n\t\t\t\tembed_size = 100,\r\n\t\t\t\tmessage_length = 250,\r\n\t\t\t\tnbtargets = 22,\r\n\t\t\t\tchosenLoss = 'categorical_crossentropy',\r\n\t\t\t\tchosenOptimizer = 'adam') :\r\n\r\n model = Sequential()\r\n model.add(Embedding(vocabulary_size, embed_size, weights=[embeddings], input_length=message_length, trainable=False))\r\n\r\n model.add(ZeroPadding1D(1))\r\n model.add(Conv1D(128, 3, activation='relu'))\r\n model.add(Conv1D(128, 3, activation='relu'))\r\n\r\n model.add(ZeroPadding1D(1))\r\n model.add(Conv1D(64, 3, activation='relu'))\r\n model.add(Conv1D(64, 3, activation='relu'))\r\n\r\n model.add(ZeroPadding1D(1))\r\n model.add(Conv1D(32, 3, activation='relu'))\r\n model.add(Conv1D(32, 3, activation='relu'))\r\n\r\n model.add(GlobalAveragePooling1D('channels_last'))\r\n\r\n model.add(Dense(512, activation='relu'))\r\n model.add(Dropout(0.5))\r\n\r\n model.add(Dense(nbtargets, activation='softmax'))\r\n model.compile(optimizer=chosenOptimizer, loss=chosenLoss, metrics=['accuracy'])\r\n model.summary()\r\n return model","sub_path":"categorisationmailsads/src/neural_architecture.py","file_name":"neural_architecture.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"616771476","text":"from alpha_vantage.timeseries import TimeSeries\nimport pandas as pd\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\n\nts = TimeSeries(key='ZZCEDAXI8ZIXHJX4',output_format='pandas')\nnames = pd.read_csv(\"D:\\\\Documents\\\\FVID_Glassdoor\\\\2018_SnP500_Names.csv\",encoding='utf-8')\n# print(names)\ntickk = []\n\n# driver = webdriver.Chrome(executable_path=\"./chromedriver\")\n# names =['Cboe Global Markets, Inc.', 'Cintas Corporation', 'Connecticut General Corporation', 'National Association','Union Electric Company', 'Ohio Edison Company', 'Pacific Enterprises Inc.', 'Red Hat, Inc.', 'L3 Technologies, Inc.','Kansas Power & Light Co', 'Brown-Forman Corporation', 'NRG Energy, Inc.', 'The Charles Schwab Corporation']\n\n# symbol =['NYSE:CAT', 'NASDAQ:SIVB', 'BATS:CBOE', 'NASDAQ:AAPL', 'NYSE:DGX', 'NYSE:APA', 'NASDAQ:FAST', 'NYSE:AAP', 'NASDAQ:MXIM', 'NASDAQ:SBAC', 'NYSE:CHD', 'NASDAQ:CTAS', 'NASDAQ:BIIB', '-', 'NYSE:D', 'NASDAQ:WDC', 'NYSE:PXD', 'NYSE:ES', 'NYSE:DLR', 'NASDAQ:ZION', 'NYSE:NBL', 'NYSE:CRM', 'NASDAQ:FLIR', 'NASDAQ:CPRT', 'OTCMKTS:UELMO', 'NYSE:FIS', 'NYSE:BHGE', 'NYSE:TAP', 'NYSE:IR', 'NYSE:PGR', 'NYSE:COP', '-', 'NYSE:ESS', 'NYSE:EL', '-', 'NYSE:PKI', 'NYSE:HIG', 'NASDAQ:KLAC', 'BMV:RHT', 'NASDAQ:CELG', 'NYSE:DAL', 'NASDAQ:DISCA', 'NASDAQ:TSCO', 'NYSE:PNW', 'NYSE:WFC', 'NYSE:C', '-', 'NYSE:PWR', '-', 'NYSE:BLL', 'NYSE:AIV', 'NASDAQ:VRSK', 'NASDAQ:ALGN', 'NYSE:HCP', 'NASDAQ:CTSH', '-', 'NYSE:BF.B', 'NYSE:AMT', 'NASDAQ:REGN', 'NASDAQ:ETFC', 'NYSE:GLW', 'NYSE:MGM', 'NYSE:F', 'NASDAQ:VIAB', 'NYSE:SJM', 'NASDAQ:PBCT', 'NYSE:BA', 'NASDAQ:IPGP', 'NYSE:USB', 'NYSE:NRG', 'NYSE:SCHW']\nexception=[]\n\nfor i in range(len(names)):\n compName = names[i]\n ticker = symbol[i]\n try :\n ticker = ticker.replace(' ','')\n\n time.sleep(1)\n print('Crawling ' + ticker + ' stock prices \\n')\n\n if '.' in ticker:\n tic = ticker.split(\":\")[1]\n df, meta_data = ts.get_daily(symbol=tic, outputsize='full')\n\n else:\n df, meta_data = ts.get_daily(symbol=ticker, outputsize='full')\n\n df.columns = ['open','high','low','close','volume']\n df.reset_index(level=0, inplace=True)\n compName = compName.replace(',', '')\n compName = compName.replace('.','')\n df.to_csv(\"D:\\\\Documents\\\\FVID_Glassdoor\\\\Stocks\\\\\" + compName + \".csv\",index=False, encoding='utf_8_sig')\n print(ticker + ' stock price retrieval completed \\n\\n')\n print('Number of crawled Stocks : ', i+1-len(exception))\n except Exception:\n print('Exception Exist for ' + compName + '\\n\\n')\n exception.append(compName)\n tickk.append(ticker)\n pass\n time.sleep(2)\n\n\n# for i in range(len(names)):\n# compName = names.iloc[i,0]\n# ticker = names.iloc[i,1]\n# try :\n# ticker = ticker.replace(' ','')\n#\n# time.sleep(1)\n# print('Crawling ' + ticker + ' stock prices \\n')\n#\n# if '.' in ticker:\n# tic = ticker.split(\":\")[1]\n# df, meta_data = ts.get_daily(symbol=tic, outputsize='full')\n#\n# else:\n# df, meta_data = ts.get_daily(symbol=ticker, outputsize='full')\n#\n# df.columns = ['open','high','low','close','volume']\n# df.reset_index(level=0, inplace=True)\n# compName = compName.replace(',', '')\n# compName = compName.replace('.','')\n# df.to_csv(\"D:\\\\Documents\\\\FVID_Glassdoor\\\\Stocks\\\\\" + compName + \".csv\",index=False, encoding='utf_8_sig')\n# print(ticker + ' stock price retrieval completed \\n\\n')\n# print('Number of crawled Stocks : ', i+1-len(exception))\n# except Exception:\n# print('Exception Exist for ' + compName + '\\n\\n')\n# exception.append(compName)\n# tickk.append(ticker)\n# pass\n# time.sleep(2)\n\n\n\n\n# for i in range(len(names)):\n# try :\n# compName = names.iloc[i,0]\n# compName = compName.replace('The ', '')\n# compName = compName.replace(' (US)','')\n# driver.get('https://www.google.com')\n# search=driver.find_element_by_name('q')\n# search.send_keys(compName + ' stock price')\n# search.send_keys(Keys.RETURN)\n# time.sleep(2)\n# if driver.find_element_by_xpath('//span[@class=\"HfMth\"]').text :\n# ticker = driver.find_element_by_xpath('//span[@class=\"HfMth\"]').text\n# else :\n# driver.get('https://www.google.com')\n# search=driver.find_element_by_name('q')\n# search.send_keys(compName + ' stock symbol')\n# search.send_keys(Keys.RETURN)\n# time.sleep(2)\n# ticker = driver.find_element_by_xpath('//span[@class=\"HfMth\"]').text\n# symbol.append(ticker)\n# print(ticker)\n# except Exception:\n# ticker = '-'\n# symbol.append(ticker)\n# print(ticker)\n\nprint(exception)\nprint(tickk)\nprint(len(tickk))\n","sub_path":"alphavan.py","file_name":"alphavan.py","file_ext":"py","file_size_in_byte":5094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"553334214","text":"#!/usr/bin/python\n#-*- coding: utf-8 -*-\n\n# ======================================================================\n# Copyright 2016 Julien LE CLEACH\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ======================================================================\n\nimport time\n\nfrom supervisor.childutils import get_asctime\nfrom supervisor.states import ProcessStates\n\nfrom supvisors.strategy import get_address\nfrom supvisors.ttypes import StartingStrategies, StartingFailureStrategies\nfrom supvisors.utils import supvisors_short_cuts\n\n\nclass Commander(object):\n \"\"\" Base class handling the starting / stopping of processes and applications.\n\n Attributes are:\n\n - planned_sequence: the applications to be commanded, as a dictionary of processes,\n grouped by application sequence order, application name and process sequence order,\n - planned_jobs: the current sequence of applications to be commanded,\n as a dictionary of processes, grouped by application name and process sequence order,\n - current_jobs: a dictionary of commanded processes, grouped by application name.\n \"\"\"\n\n def __init__(self, supvisors):\n \"\"\" Initialization of the attributes. \"\"\"\n # keep a reference of the Supvisors data\n self.supvisors = supvisors\n # shortcuts for readability\n supvisors_short_cuts(self, ['logger'])\n #attributes\n self.planned_sequence = {} # {application_sequence: {application_name: {process_sequence: [process]}}}\n self.planned_jobs = {} # {application_name: {process_sequence: [process]}}\n self.current_jobs = {} # {application_name: [process]}\n\n def in_progress(self):\n \"\"\" Return True if there are jobs planned or in progress. \"\"\"\n self.logger.debug('progress: planned_sequence={} planned_jobs={} current_jobs={}'.format(\n self.printable_planned_sequence(), self.printable_planned_jobs(), self.printable_current_jobs()))\n return len(self.planned_sequence) or len(self.planned_jobs) or len(self.current_jobs)\n\n def has_application(self, application_name):\n \"\"\" Return True if application is in jobs. \"\"\"\n # get all planned applications\n planned_applications = [app_name for jobs in self.planned_sequence.values()\n for app_name in jobs]\n # search for application name in internal structures\n return application_name in planned_applications \\\n or application_name in self.planned_jobs \\\n or application_name in self.current_jobs\n\n # log facilities\n def printable_planned_sequence(self):\n \"\"\" Simple form of planned_sequence, so that it can be printed. \"\"\"\n return {application_sequence:\n {application_name:\n {sequence: Commander.printable_process_list(processes) for sequence, processes in sequences.items()}\n for application_name, sequences in applications.items()}\n for application_sequence, applications in self.planned_sequence.items()}\n\n def printable_planned_jobs(self):\n \"\"\" Simple form of planned_jobs, so that it can be printed. \"\"\"\n return {application_name:\n {sequence: Commander.printable_process_list(processes) for sequence, processes in sequences.items()}\n for application_name, sequences in self.planned_jobs.items()}\n\n def printable_current_jobs(self):\n \"\"\" Simple form of current_jobs, so that it can be printed. \"\"\"\n return {application_name: Commander.printable_process_list(processes)\n for application_name, processes in self.current_jobs.items()}\n\n @staticmethod\n def printable_process_list(processes):\n \"\"\" Simple form of process_list, so that it can be printed. \"\"\"\n return [process.namespec() for process in processes]\n\n def initial_jobs(self):\n \"\"\" Initializes the planning of the jobs (start or stop). \"\"\"\n self.logger.debug('planned_sequence={}'.format(self.printable_planned_sequence()))\n # pop lower application group from planned_sequence\n if self.planned_sequence:\n self.planned_jobs = self.planned_sequence.pop(min(self.planned_sequence.keys()))\n self.logger.debug('planned_jobs={}'.format(self.printable_planned_jobs()))\n # iterate on copy to avoid problems with deletions\n for application_name in self.planned_jobs.keys()[:]:\n self.process_application_jobs(application_name)\n else:\n self.logger.debug('command completed')\n\n def process_application_jobs(self, application_name):\n \"\"\" Triggers the starting of a subset of the application. \"\"\"\n if application_name in self.planned_jobs:\n sequence = self.planned_jobs[application_name]\n self.current_jobs[application_name] = jobs = []\n # loop until there is something to do in sequence\n while sequence and not jobs and application_name in self.planned_jobs:\n # pop lower group from sequence\n group = sequence.pop(min(sequence.keys()))\n self.logger.debug('application {} - next group: {}'.format(application_name, self.printable_process_list(group)))\n for process in group:\n self.logger.trace('{} - state={}'.format(process.namespec(), process.state_string()))\n self.process_job(process, jobs)\n self.logger.debug('current_jobs={}'.format(self.printable_current_jobs()))\n # if nothing in progress when exiting the loop, delete application entry in current_jobs\n if not jobs:\n self.logger.debug('no more jobs for application {}'.format(application_name))\n self.current_jobs.pop(application_name, None)\n # clean application job if its sequence is empty\n if not sequence:\n self.logger.debug('all jobs planned for application {}'.format(application_name))\n self.planned_jobs.pop(application_name, None)\n else:\n self.logger.warn('application {} not found in jobs'.format(application_name))\n\n def process_job(self, process, jobs):\n \"\"\" Perform the action on process and push progeess in jobs list.\n Method must be implemented in subclasses. \"\"\"\n raise NotImplementedError\n\n\nclass Starter(Commander):\n \"\"\" Class handling the starting of processes and applications.\n\n Attributes are:\n - strategy: the starting strategy applied, defaulted to the value\n set in the Supervisor configuration file.\n \"\"\"\n\n def __init__(self, supvisors):\n \"\"\" Initialization of the attributes. \"\"\"\n Commander.__init__(self, supvisors)\n #attributes\n self._strategy = supvisors.options.starting_strategy\n\n @property\n def strategy(self):\n \"\"\" Property for the 'strategy' attribute.\n The setter is used to overload the default strategy (used in rpcinterface and web page). \"\"\"\n return self._strategy\n\n @strategy.setter\n def strategy(self, strategy):\n self.logger.info('start processes using strategy {}'.format(StartingStrategies._to_string(strategy)))\n self._strategy = strategy\n\n def abort(self):\n \"\"\" Abort all planned and current jobs. \"\"\"\n self.planned_sequence = {}\n self.planned_jobs = {}\n self.current_jobs = {}\n\n def start_applications(self):\n \"\"\" Plan and start the necessary jobs to start all the applications having a start_sequence.\n It uses the default strategy, as defined in the Supervisor configuration file. \"\"\"\n self.logger.info('start all applications')\n # internal call: default strategy always used\n self.strategy = self.supvisors.options.starting_strategy\n # starting initialization: push program list in todo list\n for application in self.supvisors.context.applications.values():\n # do not start an application that is not properly STOPPED\n if application.stopped() and application.rules.start_sequence > 0:\n self.store_application_start_sequence(application)\n # start work\n self.initial_jobs()\n\n def default_start_application(self, application):\n \"\"\" Plan and start the necessary jobs to start the application in parameter,\n with the default strategy. \"\"\"\n return self.start_application(self.supvisors.options.starting_strategy, application)\n\n def start_application(self, strategy, application):\n \"\"\" Plan and start the necessary jobs to start the application in parameter,\n with the strategy requested. \"\"\"\n self.logger.info('start application {}'.format(application.application_name))\n # called from rpcinterface: strategy is a user choice\n self.strategy = strategy\n # push program list in todo list and start work\n if application.stopped():\n self.store_application_start_sequence(application)\n self.logger.debug('planned_sequence={}'.format(self.printable_planned_sequence()))\n if self.planned_sequence:\n # add application immediately to planned jobs if something in list\n self.planned_jobs.update(self.planned_sequence.pop(min(self.planned_sequence.keys())))\n self.process_application_jobs(application.application_name)\n # return True when started\n return not self.in_progress()\n\n def default_start_process(self, process):\n \"\"\" Plan and start the necessary job to start the process in parameter,\n with the default strategy.\n Return False when starting not completed. \"\"\"\n return self.start_process(self.supvisors.options.starting_strategy,\n process)\n\n def start_process(self, strategy, process, extra_args=''):\n \"\"\" Plan and start the necessary job to start the process in parameter,\n with the strategy requested.\n Return False when starting not completed. \"\"\"\n self.logger.info('start process {}'.format(process.namespec()))\n # called from rpcinterface: strategy is a user choice\n self.strategy = strategy\n # store extra arguments to be passed to the command line\n process.extra_args = extra_args\n # WARN: when starting a single process (outside the scope of an\n # application starting), do NOT consider the 'wait_exit' rule\n process.ignore_wait_exit = True\n # push program list in todo list and start work\n job = self.current_jobs.setdefault(process.application_name, [])\n starting = self.process_job(process, job)\n # upon failure, remove inProgress entry if empty\n if not job:\n del self.current_jobs[process.application_name]\n # return True when starting\n return starting\n\n def check_starting(self):\n \"\"\" Check the progress of the application starting. \"\"\"\n self.logger.debug('starting progress: planned_sequence={} planned_jobs={} current_jobs={}'.format(\n self.printable_planned_sequence(), self.printable_planned_jobs(), self.printable_current_jobs()))\n # once the start_process has been called, a STARTING event is expected in less than 5 seconds\n now = time.time()\n processes = [process for process_list in self.current_jobs.values()\n for process in process_list]\n self.logger.trace('now={} checking processes={}'.format(now,\n [(process.process_name, process.state, process.request_time, process.last_event_time)\n for process in processes]))\n for process in processes:\n # depending on ini file, it may take a while before the process enters in RUNNING state\n # so just test that is in not in a STOPPED-like state 5 seconds after request_time\n if process.stopped() and max(process.last_event_time, process.request_time) + 5 < now:\n # generate a FATAL event for this process\n self.force_process_fatal(process.namespec(), 'Still stopped 5 seconds after start request')\n # return True when starting is completed\n return not self.in_progress()\n\n def on_event(self, process):\n \"\"\" Triggers the following of the start sequencing, depending on the new process status. \"\"\"\n try:\n # first check if event is in the sequence logic,\n # i.e. it corresponds to a process in current jobs\n jobs = self.current_jobs[process.application_name]\n assert(process in jobs)\n self.on_event_in_sequence(process, jobs)\n except (KeyError, AssertionError):\n # otherwise, check if event impacts the starting sequence\n self.on_event_out_of_sequence(process)\n\n def on_event_in_sequence(self, process, jobs):\n \"\"\" Manages the impact of an event that is part of the starting sequence. \"\"\"\n if process.state in [ProcessStates.STOPPED, ProcessStates.STOPPING, ProcessStates.UNKNOWN]:\n # unexpected event in a starting phase: someone has requested to stop the process as it is starting\n # remove from inProgress\n process.ignore_wait_exit = False\n jobs.remove(process)\n # decide to continue starting or not\n self.process_failure(process)\n elif process.state == ProcessStates.STARTING:\n # on the way\n pass\n elif process.state == ProcessStates.RUNNING:\n # if not exit expected, job done. otherwise, wait\n if not process.rules.wait_exit or process.ignore_wait_exit:\n process.ignore_wait_exit = False\n jobs.remove(process)\n elif process.state == ProcessStates.BACKOFF:\n # something wrong happened, just wait\n self.logger.warn('problems detected with {}'.format(process.namespec()))\n elif process.state == ProcessStates.EXITED:\n # remove from inProgress\n process.ignore_wait_exit = False\n jobs.remove(process)\n # an EXITED process is accepted if wait_exit is set\n if process.rules.wait_exit and process.expected_exit:\n self.logger.info('expected exit for {}'.format(process.namespec()))\n else:\n self.process_failure(process)\n elif process.state == ProcessStates.FATAL:\n # remove from inProgress\n process.ignore_wait_exit = False\n jobs.remove(process)\n # decide to continue starting or not\n self.process_failure(process)\n # check if there are remaining jobs in progress for this application\n if not jobs:\n # remove application entry from current_jobs\n del self.current_jobs[process.application_name]\n # trigger next job for aplication\n if process.application_name in self.planned_jobs:\n self.process_application_jobs(process.application_name)\n else:\n self.logger.info('starting completed for application {}'.\n format(process.application_name))\n # check if there are planned jobs\n if not self.planned_jobs:\n # trigger next sequence of applications\n self.initial_jobs()\n\n def on_event_out_of_sequence(self, process):\n \"\"\" Manages the impact of a crash event that is out of the starting sequence.\n Note: Keeping in mind the possible origins of the event:\n * a request performed by this Starter,\n * a request performed directly on any Supervisor (local or remote),\n * a request performed on a remote Supvisors,\n let's consider the following cases:\n 1) The application is in the planned sequence, or process is in the planned jobs.\n => do nothing, give a chance to this Starter.\n 2) The process is NOT in the application planned jobs.\n The process was likely started previously in the sequence of this Starter,\n and it crashed after its RUNNING state but before the application is fully started.\n => apply starting failure strategy through basic process_failure\n 3) The application is NOT handled in this Starter\n => running failure strategy could be applied outside of here\n \"\"\"\n # find the conditions of case 2\n if process.crashed() and process.application_name in self.planned_jobs:\n planned_application_jobs = self.planned_jobs[process.application_name]\n planned_process_jobs = [proc for proc_list in planned_application_jobs.values()\n for proc in proc_list]\n if process not in planned_process_jobs:\n self.process_failure(process)\n\n def store_application_start_sequence(self, application):\n \"\"\" Copy the start sequence and remove programs that are not meant to be\n started automatically, i.e. their start_sequence is 0. \"\"\"\n application_sequence = application.start_sequence.copy()\n application_sequence.pop(0, None)\n if len(application_sequence) > 0:\n sequence = self.planned_sequence.setdefault(\n application.rules.start_sequence, {})\n sequence[application.application_name] = application_sequence\n\n def process_job(self, process, jobs):\n \"\"\" Start the process on the relevant address.\n Return True if process is starting. \"\"\"\n reset_flag = True\n # process must be stopped\n if process.stopped():\n namespec = process.namespec()\n address = get_address(self.supvisors, self.strategy,\n process.rules.addresses, process.rules.expected_loading)\n if address:\n self.logger.info('try to start {} at address={}'.format(\n namespec, address))\n # use asynchronous xml rpc to start program\n self.supvisors.zmq.pusher.send_start_process(address,\n namespec, process.extra_args)\n # push to jobs and timestamp process\n process.request_time = time.time()\n self.logger.debug('{} requested to start at {}'.format(\n namespec, get_asctime(process.request_time)))\n jobs.append(process)\n reset_flag = False\n # reset extra arguments\n process.extra_args = ''\n else:\n self.logger.warn('no resource available to start {}'.format(\n namespec))\n self.force_process_fatal(namespec, 'no resource available')\n # due to failure, reset ignore_wait_exit flag\n if reset_flag:\n process.ignore_wait_exit = False\n # return True when process is starting\n return not reset_flag\n\n def process_failure(self, process):\n \"\"\" Updates the start sequence when a process could not be started. \"\"\"\n application_name = process.application_name\n # impact of failure on application starting\n if process.rules.required:\n self.logger.warn('starting failed for required {}'.format(\n process.process_name))\n # get starting failure strategy of related application\n application = self.supvisors.context.applications[application_name]\n failure_strategy = application.rules.starting_failure_strategy\n # apply strategy\n if failure_strategy == StartingFailureStrategies.ABORT:\n self.logger.error('abort starting of application {}'.format(\n application_name))\n # remove failed application from starting\n # do not remove application from current_jobs as requests\n # have already been sent\n self.planned_jobs.pop(application_name, None)\n elif failure_strategy == StartingFailureStrategies.STOP:\n self.logger.error('stop application {}'.format(application_name))\n self.planned_jobs.pop(application_name, None)\n self.supvisors.stopper.stop_application(application)\n else:\n self.logger.warn('continue starting of application {}'.format(\n application_name))\n else:\n self.logger.warn('starting failed for optional {}'.format(\n process.process_name))\n self.logger.warn('continue starting of application {}'.format(\n application_name))\n\n def force_process_fatal(self, namespec, reason):\n \"\"\" Publish the process state as FATAL to all Supvisors instances. \"\"\"\n self.logger.warn('force {} state to FATAL'.format(namespec))\n try:\n # this call updates the Supervisor data model\n self.supvisors.info_source.force_process_fatal(namespec, reason)\n except KeyError:\n self.logger.error('process {} unknown to this Supervisor.'.format(\n namespec))\n # the Supvisors user is not forced to use the same process\n # configuration on all machines,\n # although it is strongly recommended to avoid troubles.\n # => publish directly a fake process event to all Supvisors instances\n self.supvisors.listener.force_process_fatal(namespec)\n\n\nclass Stopper(Commander):\n \"\"\" Class handling the stopping of processes and applications. \"\"\"\n\n def stop_applications(self):\n \"\"\" Plan and start the necessary jobs to stop all the applications\n having a stop_sequence. \"\"\"\n self.logger.info('stop all applications')\n # stopping initialization: push program list in todo list\n for application in self.supvisors.context.applications.values():\n # do not stop an application that is not running\n if application.running() and application.rules.stop_sequence >= 0:\n self.store_application_stop_sequence(application)\n # start work\n self.initial_jobs()\n\n def stop_application(self, application):\n \"\"\" Plan and start the necessary jobs to stop the application in\n parameter. \"\"\"\n self.logger.info('stop application {}'.format(\n application.application_name))\n # push program list in todo list and start work\n if application.running():\n self.store_application_stop_sequence(application)\n self.logger.debug('planned_sequence={}'.format(\n self.printable_planned_sequence()))\n # add application immediately to planned jobs\n self.planned_jobs.update(self.planned_sequence.pop(\n min(self.planned_sequence.keys())))\n self.process_application_jobs(application.application_name)\n # return True when stopped\n return not self.in_progress()\n\n def stop_process(self, process):\n \"\"\" Plan and start the necessary job to stop the process in parameter. \"\"\"\n self.logger.info('stop process {}'.format(process.namespec()))\n # push program list in todo list and start work\n job = self.current_jobs.setdefault(process.application_name, [])\n self.process_job(process, job)\n # upon failure, remove inProgress entry if empty\n if not job:\n del self.current_jobs[process.application_name]\n # return True when stopped\n return not self.in_progress()\n\n def store_application_stop_sequence(self, application):\n \"\"\" Schedules the application processes to stop. \"\"\"\n if application.stop_sequence:\n sequence = self.planned_sequence.setdefault(\n application.rules.stop_sequence, {})\n sequence[application.application_name] = application.stop_sequence.copy()\n\n def process_job(self, process, jobs):\n \"\"\" Stops the process where it is running. \"\"\"\n if process.running():\n # use asynchronous xml rpc to stop program\n for address in process.addresses:\n self.logger.info('stopping process {} on {}'.format(\n process.namespec(), address))\n self.supvisors.zmq.pusher.send_stop_process(address,\n process.namespec())\n # push to jobs and timestamp process\n process.request_time = time.time()\n self.logger.debug('{} requested to stop at {}'.format(\n process.namespec(), get_asctime(process.request_time)))\n jobs.append(process)\n\n def check_stopping(self):\n \"\"\" Check the progress of the application stopping. \"\"\"\n self.logger.debug('stopping progress: planned_sequence={} '\\\n 'planned_jobs={} current_jobs={}'.format(\n self.printable_planned_sequence(),\n self.printable_planned_jobs(),\n self.printable_current_jobs()))\n # once the stop_process has been called, a STOPPING event is expected\n # in less than 5 seconds\n now = time.time()\n processes= [process\n for process_list in self.current_jobs.values()\n for process in process_list]\n self.logger.trace('now={} checking processes={}'.format(now,\n [(process.process_name, process.state,\n process.request_time, process.last_event_time)\n for process in processes]))\n for process in processes:\n # depending on ini file, it may take a while before the process\n # enters in STOPPED state\n # so just test that is in not in a RUNNING-like state 5 seconds\n # after request_time\n if process.running() and max(process.last_event_time,\n process.request_time) + 5 < now:\n self.force_process_unknown(process.namespec(),\n 'Still running 5 seconds after stop request')\n # return True when starting is completed\n return not self.in_progress()\n\n def on_event(self, process):\n \"\"\" Triggers the following of the stop sequencing, depending on\n the new process status. \"\"\"\n # check if process event has an impact on stopping in progress\n if process.application_name in self.current_jobs:\n jobs = self.current_jobs[process.application_name]\n self.logger.debug('jobs={}'.format(self.printable_current_jobs()))\n if process in jobs:\n if process.running():\n # several cases:\n # 1) expected upon conciliation of a conflicting process\n # 2) concurrent stopping / starting\n self.logger.warn('{} still running when stopping'.format(\n process.namespec()))\n elif process.stopped():\n # goal reached, whatever the state\n jobs.remove(process)\n # else STOPPING, on the way\n # check if there are remaining jobs in progress for this application\n if not jobs:\n # remove application entry from current_jobs\n del self.current_jobs[process.application_name]\n # trigger next job for application\n if process.application_name in self.planned_jobs:\n self.process_application_jobs(process.application_name)\n else:\n self.logger.info('stopping completed for application '\\\n '{}'.format(process.application_name))\n # check if there are planned jobs\n if not self.planned_jobs:\n # trigger next sequence of applications\n self.initial_jobs()\n\n def force_process_unknown(self, namespec, reason):\n \"\"\" Updates the stop sequencing when a process could not be stopped. \"\"\"\n # publish the process state as UNKNOWN to all Supvisors instances\n self.logger.warn('force {} state to UNKNOWN'.format(namespec))\n try:\n self.supvisors.info_source.force_process_unknown(namespec, reason)\n except KeyError:\n self.logger.error('impossible to force {} state to UNKNOWN. '\\\n 'process unknown in this Supervisor'.format(namespec))\n # the Supvisors user is not forced to use the same process\n # configuration on all machines,\n # although it is strongly recommended to avoid troubles.\n # so, publish directly a fake process event to all instances\n self.supvisors.listener.force_process_unknown(namespec)\n","sub_path":"supvisors/commander.py","file_name":"commander.py","file_ext":"py","file_size_in_byte":29339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"54335655","text":"import socket \nimport time\n\ns = socket.socket()\ns.bind(('', 7777)) \ns.listen(1) \nprint('Listen...')\n\n\ndef create_socket():\n print('Wait for accept...')\n conn, addr = s.accept()\n print('Accepted')\n while True:\n data = conn.recv(1024)\n print(data)\n if not data:\n break\n print('Disconnected...')\n create_socket()\n\ncreate_socket()\n\n","sub_path":"osvstar.py","file_name":"osvstar.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"271461738","text":"from telethon import TelegramClient\nfrom telethon.tl.functions.messages import GetHistoryRequest\nfrom telethon.tl.functions.messages import GetDialogsRequest\nimport time\nfrom telethon.tl.types.messages import ChannelMessages\nimport os\nfrom getpass import getpass\n\nfrom telethon import ConnectionMode\nfrom telethon.errors import SessionPasswordNeededError\nfrom telethon.tl.types import UpdateShortChatMessage, UpdateShortMessage\nfrom telethon.utils import get_display_name\n\n# (1) Use your own values here\napi_id = 152243\napi_hash = '77e78ccb9fb1b9118051e523f77d9d94'\nphone = '+989302795635'\nclient = TelegramClient('strix', api_id, api_hash)\nclient.connect()\nif not client.is_user_authorized():\n client.send_code_request(phone)\n client.sign_in(phone, input('Enter the code: '))\n#############################################################################\n#client.send_message('straxico', 'Hello! Talking to you from Telethon')\n#client.send_file('straxico', 'minerva.jpg')\n#########################################################################\ndialog_count=5\nlimit=10\ndialogs, entities = client.get_dialogs(dialog_count)\nprint('Dialogs window')\nfor i, entity in enumerate(entities, start=1):\n print('{}. {}'.format(i, get_display_name(entity)))\n#################################################################\nms = [[0 for j in range(limit+1)] for i in range(dialog_count)]\ndef getdata():\n dialogs, entities = client.get_dialogs(dialog_count)\n for i, entity in enumerate(entities, start=0):\n entity = entities[i]\n total_count, messages, senders = client.get_message_history(entity,limit)\n k=0\n ms[i][k]=get_display_name(entity)\n for j in range(len(messages)):\n msg=messages[j]\n content = msg.message\n if len(content)>=5:\n k=k+1 \n ms[i][k]=content\n return ms\n################################################\n\nz=1\ns1=getdata()\nwhile z>0:\n time.sleep(10)\n print(z)\n z=z+1\n s2=getdata()\n for i in range (0,4):\n for j in range (2,10):\n client.send_message('straxico' ,str(s2[i][j])+ \" @gutwet \"+str(i)+'and'+str(j))\n print(i,j)\n time.sleep(2)\n\n#######################################################\n# for i in range (0,4):\n# for j in range (2,10):\n# if s1[i][1]==s2[i][j]:\n# for c in range (1,j):\n# client.send_message('gutwet' ,s2[i][c]+ \" @gutwet \")\n# print(s2[i][c])\n# time.sleep(2)\n# s1=s2\n\n\n###############################################\n#for i in mezz:\n# client.send_message('straxico' ,i+ \" @gutwet \")\n# time.sleep(1.5)\n ###############################################\n## Now you have access to the first 20 messages\n#messages = result.messages\n\n","sub_path":"telethon.py","file_name":"telethon.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"329690355","text":"from flask import Flask, render_template, request\nfrom argparse import ArgumentParser\nimport os\n\nparser = ArgumentParser(description=\"Runs a OrgSlides instance\")\n\nparser.add_argument(\"-d\", \"--directory\", default=\"slides\",\n help=\"Directory to load and save the files from\")\n\nargs = parser.parse_args()\n\nroot = os.path.abspath(os.path.dirname(__file__))\napp = Flask(__name__)\n\n@app.route(\"/\")\n@app.route(\"/list\")\ndef index():\n files = []\n for dirpath, dirnames, filenames in os.walk(args.directory):\n for filename in filenames:\n path = os.path.join(dirpath, filename)\n path = os.path.relpath(path, args.directory)\n files.append(path)\n files.sort()\n return render_template(\"list.html\", files=files)\n\n@app.route(\"/edit/\", methods=[\"GET\"])\ndef edit(filename):\n return render_template(\"edit.html\", filename=filename)\n\n@app.route(\"/get/\", methods=[\"GET\"])\ndef get(filename):\n with open(os.path.join(args.directory, filename), \"r\") as f:\n content = f.readlines()\n return \"\".join(content)\n\n@app.route(\"/save/\", methods=[\"POST\"])\ndef save(filename):\n with open(os.path.join(args.directory, filename), \"w\") as f:\n f.write(request.form[\"content\"])\n return \"ok\"\n\n@app.route(\"/published/\")\ndef preview(filename):\n return render_template(\"slides.html\", filename=filename)\n\nif __name__ == \"__main__\":\n app.run(port=5000)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"336929133","text":"from dagster_graphql.test.utils import execute_dagster_graphql\nfrom .execution_queries import START_PIPELINE_EXECUTION_QUERY, SUBSCRIPTION_QUERY\nfrom .setup import define_context\n\n\ndef sync_execute_get_payload(variables, raise_on_error=True, context=None):\n if not context:\n context = define_context(raise_on_error=raise_on_error)\n\n result = execute_dagster_graphql(context, START_PIPELINE_EXECUTION_QUERY, variables=variables)\n\n assert result.data\n\n if result.data['startPipelineExecution']['__typename'] != 'StartPipelineExecutionSuccess':\n raise Exception(result.data)\n run_id = result.data['startPipelineExecution']['run']['runId']\n\n subscription = execute_dagster_graphql(context, SUBSCRIPTION_QUERY, variables={'runId': run_id})\n\n subscribe_results = []\n subscription.subscribe(subscribe_results.append)\n\n assert len(subscribe_results) == 1\n subscribe_result = subscribe_results[0]\n assert not subscribe_result.errors\n assert subscribe_result.data\n return subscribe_result.data\n\n\ndef sync_execute_get_run_log_data(variables, raise_on_error=True, context=None):\n payload_data = sync_execute_get_payload(\n variables, raise_on_error=raise_on_error, context=context\n )\n assert payload_data['pipelineRunLogs']\n return payload_data['pipelineRunLogs']\n\n\ndef sync_execute_get_events(variables, context=None):\n return sync_execute_get_run_log_data(variables, context=context)['messages']\n","sub_path":"python_modules/dagster-graphql/dagster_graphql_tests/graphql/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"522467691","text":"\"\"\"Erlang External Term Format serializer/deserializer\"\"\"\nimport struct\nimport sys\nimport six\n\nfrom erlastic.codec import ErlangTermDecoder, ErlangTermEncoder\nfrom erlastic.types import *\n\nencode = ErlangTermEncoder().encode\ndecode = ErlangTermDecoder().decode\n\nif six.PY3:\n stdread = sys.stdin.buffer.read\n stdwrite = sys.stdout.buffer.write\nelse:\n stdread = sys.stdin.read\n stdwrite = sys.stdout.write\n\n\ndef mailbox_gen():\n while True:\n len_bin = stdread(4)\n if len(len_bin) != 4:\n return\n (length,) = struct.unpack('!I', len_bin)\n yield decode(stdread(length))\n\n\ndef port_gen():\n while True:\n term = encode((yield))\n stdwrite(struct.pack('!I', len(term)))\n stdwrite(term)\n sys.stdout.flush()\n\ndef port_connection():\n port = port_gen()\n next(port)\n return mailbox_gen(), port\n","sub_path":"erlastic/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"450760141","text":"f = open(\"test1.txt\", 'r')\nl = [line.strip() for line in f]\nprint(l)\nz = [str(l)]\nx = open('test2.txt', 'w')\nfor y in z:\n x.write(y)\n\nf.close()\nx.close()","sub_path":"Course_Python/file/hello2.py","file_name":"hello2.py","file_ext":"py","file_size_in_byte":156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"343135285","text":"from xml.etree.ElementTree import Element, tostring\n\nclass covid19assessment:\n \n def __init__(self, currently_infected, days):\n self.days = days\n self.currently_infected = currently_infected\n self.infections_by_requested_time = int(self.currently_infected*(2**(self.days//3)))\n \n\n def severe_cases_by_requested_time(self):\n self.severe_cases_by_requested_time = int(self.infections_by_requested_time*0.15)\n return self.severe_cases_by_requested_time\n\n def hospital_beds_by_requested_time(self, total_hospital_beds):\n percentage = 65\n self.available_hospital_beds = int(total_hospital_beds*((100-percentage)/100))\n return self.available_hospital_beds\n \n def cases_for_icu__by_requested_time(self):\n self.cases_for_icu_by_requested_time = int(self.infections_by_requested_time*0.05)\n return self.cases_for_icu_by_requested_time\n \n def cases_for_ventilators_by_requested_time(self):\n self.cases_for_ventilators_by_requested_time = int(self.infections_by_requested_time*0.02)\n return self.cases_for_ventilators_by_requested_time\n \n def dollars_in_flight(self, avg_daily_income_population, avg_daily_income_in_usd):\n self.dollars_in_flight = self.infections_by_requested_time*avg_daily_income_population*avg_daily_income_in_usd*self.days\n return self.dollars_in_flight\n\ndef estimator(data):\n \"\"\"\n This function computes the estimation for impact and severeImpact\n \"\"\"\n \n reported_cases = data[\"reportedCases\"]\n total_hospital_beds = data[\"totalHospitalBeds\"]\n avg_daily_income_in_usd = data[\"region\"][\"avgDailyIncomeInUSD\"]\n avg_daily_income_population = data[\"region\"][\"avgDailyIncomePopulation\"]\n if data[\"periodType\"] == \"weeks\":\n days = data[\"timeToElapse\"] * 4\n elif data[\"periodType\"] == \"month\":\n days = data[\"timeToElapse\"] * 30\n else:\n days = data[\"timeToElapse\"]\n\n \n impact_currently_infected = reported_cases*10\n severe_currently_infected = reported_cases*50\n \n\n impact = {}\n severeImpact = {}\n result = {}\n \n estimate = covid19assessment(impact_currently_infected, days)\n impact[\"currentlyInfected\"] = impact_currently_infected\n impact[\"severeCasesByRequestedTime\"] = estimate.severe_cases_by_requested_time()\n impact[\"totalHospitalBeds\"] = estimate.hospital_beds_by_requested_time(total_hospital_beds)\n impact[\"casesForICUByRequestedTime\"] = estimate.cases_for_icu__by_requested_time()\n impact[\"casesForVentilatorsByRequestedTime\"] = estimate.cases_for_ventilators_by_requested_time()\n impact[\"dollarsInFlight\"] = estimate.dollars_in_flight(avg_daily_income_population, avg_daily_income_in_usd)\n\n severe_estimate = covid19assessment(severe_currently_infected, days)\n severeImpact[\"currentlyInfected\"] = severe_currently_infected\n severeImpact[\"severeCasesByRequestedTime\"] = severe_estimate.severe_cases_by_requested_time()\n severeImpact[\"totalHospitalBeds\"] = severe_estimate.hospital_beds_by_requested_time(total_hospital_beds)\n severeImpact[\"casesForICUByRequestedTime\"] = severe_estimate.cases_for_icu__by_requested_time()\n severeImpact[\"casesForVentilatorsByRequestedTime\"] = severe_estimate.cases_for_ventilators_by_requested_time()\n severeImpact[\"dollarsInFlight\"] = severe_estimate.dollars_in_flight(avg_daily_income_population, avg_daily_income_in_usd)\n\n result[\"data\"] = data\n result[\"impact\"] = impact\n result[\"severImpact\"] = severeImpact\n return result\n\n \ndef dict_to_xml(tag, d):\n \"\"\"\n Turn a simple dict of key/value pairs into XML\n \"\"\"\n elem = Element(tag)\n for key, val in d.items():\n child = Element(key)\n child.text = str(val)\n elem.append(child)\n return elem\n\n\n\n\n \n","sub_path":"covid19/estimator.py","file_name":"estimator.py","file_ext":"py","file_size_in_byte":3803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"94704171","text":"import torch\nfrom torch import nn, distributed\nfrom torch.nn import functional as F\n\nfrom homura.utils import is_faiss_available, is_distributed, is_horovod_available\nfrom .ema import exponential_moving_average_\nfrom .functional import custom_straight_through_estimator, k_nearest_neighbor as knn\n\n\nclass VQModule(nn.Module):\n \"\"\" Vector Quantization module used in VQ-VAE [van den Oord et al. 17]\n\n \"\"\"\n\n def __init__(self,\n emb_dim: int,\n dict_size: int,\n momentum: float = 0.99,\n epsilon: float = 1e-5,\n knn_backend=\"faiss\" if is_faiss_available() else \"torch\",\n metric: str = 'l2'):\n\n super(VQModule, self).__init__()\n\n self.emb_dim = emb_dim\n self.dict_size = dict_size\n self.epsilon = epsilon\n self._knn_backend = knn_backend\n self.metric = metric\n self.frozen = False\n # this handles the issue with DataParallel\n\n assert 0 <= momentum <= 1\n self.gamma = momentum\n\n # embed: DxC (emb_dim==C)\n embed = F.normalize(torch.randn(dict_size, emb_dim), dim=1, p=2)\n self.register_buffer(\"track_num\", torch.zeros(dict_size, 1))\n self.register_buffer(\"track_enc\", embed.clone())\n self.register_buffer(\"embed\", embed)\n self._first_time = True\n\n self._distributed_update()\n\n def forward(self,\n input: torch.Tensor) -> (torch.Tensor, torch.Tensor):\n # returns reconstructed inputs and corresponding reconstructed loss\n\n distance, ids, vqs = self._forward(*self.flatten(input))\n return custom_straight_through_estimator(vqs, input), F.mse_loss(vqs.detach(), input), ids\n\n @torch.no_grad()\n def _forward(self,\n flatten: torch.Tensor,\n shape: tuple) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n\n # distance: (BWH)x1, ids: (BWH)x1\n distance, ids = knn(self.embed, flatten, 1, self.metric, backend=self._knn_backend)\n\n vqs = self.lookup(ids)\n\n if self.training and not self.frozen:\n self.ema_update(flatten, ids)\n\n if len(shape) == 4:\n # vqs: (BWH)xC -> BxCxHxW\n b, c, h, w = shape\n vqs = vqs.view(b, w, h, c).transpose(1, -1)\n ids = ids.view(b, w, h).transpose(1, -1)\n else:\n # vqs\n vqs = vqs.squeeze()\n ids = ids.squeeze()\n\n return distance, ids, vqs\n\n def flatten(self,\n input: torch.Tensor):\n if input.dim() == 2:\n # BxC\n shape = input.size()\n flatten = input\n elif input.dim() == 4:\n # input: BxCxHxW -> flatten: (BWH)xC\n shape = input.size()\n flatten = input.transpose(1, -1).reshape(-1, self.emb_dim)\n else:\n raise NotImplementedError\n return flatten, shape\n\n @torch.no_grad()\n def ema_update(self,\n flatten: torch.Tensor,\n ids: torch.Tensor) -> None:\n # flatten: (BHW)xC, ids: BxHxW -> (BHW)x1\n ids = ids.view(-1, 1)\n # onehot_ids: (BHW)xD\n onehot_ids = ids.new_zeros([ids.size(0), self.dict_size], dtype=torch.float)\n onehot_ids.scatter_(1, ids, 1)\n # (BHW)xD -> 1xD -> Dx1\n if self._first_time:\n self.track_num.copy_(onehot_ids.sum(dim=0).view_as(self.track_num))\n self.track_enc.copy_(onehot_ids.t().matmul(flatten))\n self._first_time = False\n else:\n exponential_moving_average_(self.track_num,\n onehot_ids.sum(dim=0).view_as(self.track_num),\n self.gamma)\n # Dx(BHW) x (BHW)xC -> DxC\n exponential_moving_average_(self.track_enc,\n onehot_ids.t().matmul(flatten),\n self.gamma)\n\n # following sonnet's implementation\n factor = 1 + (self.epsilon * self.dict_size) / self.track_num.sum()\n self.embed = self.track_enc * factor / (self.track_num + self.epsilon)\n self._distributed_update()\n\n def _distributed_update(self):\n if not is_distributed():\n return\n\n if is_horovod_available():\n import horovod.torch as hvd\n\n hvd.allreduce(self.track_num)\n hvd.allreduce(self.track_enc)\n hvd.allreduce(self.embed)\n else:\n distributed.all_reduce(self.track_num, op=distributed.ReduceOp.SUM)\n distributed.all_reduce(self.track_enc, op=distributed.ReduceOp.SUM)\n distributed.all_reduce(self.embed, op=distributed.ReduceOp.SUM)\n ws = distributed.get_world_size()\n self.track_num /= ws\n self.track_enc /= ws\n self.embed /= ws\n\n def lookup(self, ids: torch.Tensor):\n return F.embedding(ids, self.embed)\n\n def __repr__(self):\n return f\"VQModule(emb_dim={self.emb_dim}, dict_size={self.dict_size})\"\n","sub_path":"homura/modules/vq.py","file_name":"vq.py","file_ext":"py","file_size_in_byte":5062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"316743173","text":"# import logging\n\nfrom django.contrib.auth.decorators import permission_required\nfrom django.contrib.auth.models import Permission, Group\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\n\nfrom app.models import MyUser\n\n# logger = logging.getLogger('console')\n\n\ndef create_user(request):\n if request.method == 'GET':\n MyUser.objects.create_user(username='admin', password='1234')\n\n return HttpResponse('创建用户成功')\n\n\ndef add_user_permission(request):\n if request.method == 'GET':\n # 给姓名叫admin的用户添加修改用户名的权限\n # 获取到叫admin的用户\n user = MyUser.objects.filter(username='admin').first()\n # 获取到修改用户名的权限\n per = Permission.objects.filter(codename='change_myuser_username').first()\n # 添加权限\n # user.user_permissions.add(per)\n\n # 删除权限\n # user.user_permissions.remove(per)\n\n # 清空权限\n user.user_permissions.clear\n\n return HttpResponse('添加用户权限成功')\n\n\ndef add_group_permission(request):\n if request.method == 'GET':\n # 创建审核组,并分配编辑权限\n group = Group.objects.filter(name='审核组').first()\n if group:\n per_list = ['change_myuser', 'delete_myuser', 'change_myuser_username', 'change_myuser_password']\n # 获取编辑的四个权限\n perms = Permission.objects.filter(codename__in=per_list)\n for per in perms:\n # 添加组和权限之间的关系\n group.permissions.add(per)\n # 删除组和权限之间的关系\n # group.permissions.remove(per)\n return HttpResponse('添加组和权限的关系')\n else:\n # 不存在就创建组\n Group.objects.create(name='审核组')\n return HttpResponse('审核组没有创建,请先创建')\n\n\ndef add_user_group(request):\n if request.method == 'GET':\n # 给admin用户分配审核组\n # 先获取到对应的用户和组\n user = MyUser.objects.filter(username='admin').first()\n group = Group.objects.filter(name='审核组').first()\n\n # 给admin用户分配审核组\n user.groups.add(group)\n\n return HttpResponse('分配组成功')\n\n# 查询用户的权限\ndef user_permission(request):\n if request.method == 'GET':\n user = MyUser.objects.filter(username='admin').first()\n # 查询user的权限\n\n # 1.通过权限直接查询\n per = user.user_permissions.all().values('codename')\n\n # 2.通过组来查询\n # 方法1:\n perms = user.groups.first().permissions.all().values('codename')\n\n # 方法2:得到权限集合\n user.get_group_permissions()\n\n # 获取用户所有的权限\n user.get_all_permissions()\n\n return HttpResponse(' ')\n\n\n@permission_required('app.change_myuser_username')\ndef index(request):\n if request.method == 'GET':\n # logging.info('index方法')\n # change_myuser_username\n user = request.user\n # return HttpResponse('我是首页,我需要有修改用户名的权限才能访问')\n return render(request, 'index.html')","sub_path":"5.django/day09/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"60120186","text":"import random\n\nclass Edge:\n def __init__(self, destination):\n self.destination = destination\n\n\nclass Vertex:\n def __init__(self, value, color, **pos):\n self.value = value\n self.color = color\n self.pos = pos\n self.edges = []\n\n\nclass Graph:\n def __init__(self):\n self.vertexes = []\n\n def debug_create_test_data(self):\n vertex_1 = Vertex('v1', color='rgb(255, 0, 0)', x=40, y=40)\n vertex_2 = Vertex('v2', color='blue', x=140, y=140)\n vertex_3 = Vertex('v3', color='green', x=300, y=400)\n vertex_4 = Vertex('v4', color='gold', x=400, y=200)\n vertex_5 = Vertex('v5', color='purple', x=100, y=400)\n\n vertex_1.edges.append(Edge(vertex_2))\n vertex_1.edges.append(Edge(vertex_3))\n\n vertex_2.edges.append(Edge(vertex_1))\n vertex_2.edges.append(Edge(vertex_4))\n\n vertex_4.edges.append(Edge(vertex_1))\n vertex_5.edges.append(Edge(vertex_4))\n\n vertex_list = [vertex_1, vertex_2, vertex_3, vertex_4, vertex_5]\n self.vertexes.extend(vertex_list)\n\n def bfs(self, start):\n random_color = f\"rgb({random.randint(0, 255)}, {random.randint(0, 255)}, {random.randint(0, 255)})\"\n\n queue = []\n found = []\n\n queue.append(start)\n found.append(start)\n\n start.color = random_color\n\n while len(queue) > 0:\n v = queue[0]\n for edge in v.edges:\n if edge.destination not in found:\n found.append(edge.destination)\n queue.append(edge.destination)\n edge.destination.color = random_color\n queue.pop(0)\n\n return found\n\n","sub_path":"src/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"263359490","text":"def selection_sort(l):\n \"\"\"\n @param {list} l - the list to sort\n @return {tuple(list, number)} - Tuple(sorted list, number of iterations)\n \"\"\"\n sweeps = 0\n for i in range(len(l)):\n sweeps += 1\n min_val_index = i\n for j in range(i + 1, len(l)):\n if l[j] < l[min_val_index]:\n min_val_index = j\n if i != min_val_index:\n swap(l, i, min_val_index)\n return (l, 0)\n\n\ndef swap(l, index_a, index_b):\n \"\"\"\n Swaps two index in a list\n @param {list} l - the list\n @param {number} index_a - The first index\n @param {number} index_b - The second index\n \"\"\"\n tmp = l[index_a]\n l[index_a] = l[index_b]\n l[index_b] = tmp\n","sub_path":"sorting/Selection_Sort/python/selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"120368924","text":"#\n# @lc app=leetcode id=33 lang=python\n#\n# [33] Search in Rotated Sorted Array\n#\n# https://leetcode.com/problems/search-in-rotated-sorted-array/description/\n#\n# algorithms\n# Medium (32.36%)\n# Total Accepted: 337.5K\n# Total Submissions: 1M\n# Testcase Example: '[4,5,6,7,0,1,2]\\n0'\n#\n# Suppose an array sorted in ascending order is rotated at some pivot unknown\n# to you beforehand.\n# \n# (i.e., [0,1,2,4,5,6,7] might become [4,5,6,7,0,1,2]).\n# \n# You are given a target value to search. If found in the array return its\n# index, otherwise return -1.\n# \n# You may assume no duplicate exists in the array.\n# \n# Your algorithm's runtime complexity must be in the order of O(log n).\n# \n# Example 1:\n# \n# \n# Input: nums = [4,5,6,7,0,1,2], target = 0\n# Output: 4\n# \n# \n# Example 2:\n# \n# \n# Input: nums = [4,5,6,7,0,1,2], target = 3\n# Output: -1\n# \n#\nclass Solution(object):\n def search(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n self.target = target\n return self.two_search(arr=nums, left=0, right=len(nums) - 1)\n \n def two_search(self, arr, left, right):\n if left > right:\n return -1\n mid = int((left + right) / 2)\n if arr[mid] == self.target:\n return mid\n if arr[mid] < arr[right]:\n if self.target > arr[mid] and self.target <= arr[right]:\n return self.two_search(arr=arr, left=mid + 1, right=right)\n else:\n return self.two_search(arr=arr, left=left, right=mid - 1)\n else:\n if self.target >= arr[left] and self.target < arr[mid]:\n return self.two_search(arr=arr, left=left, right=mid - 1)\n else:\n return self.two_search(arr=arr, left=mid + 1, right=right)\n\n\n\nif __name__ == '__main__':\n s = Solution()\n t_1 = [1,3]\n t_2 = 0\n print(s.search(nums=t_1, target=t_2))\n \n","sub_path":"Array/medium/33.search-in-rotated-sorted-array.py","file_name":"33.search-in-rotated-sorted-array.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"102846094","text":"# based on (models/tutorials/image/cifar10/)\r\n\r\nimport os\r\nimport re\r\n\r\nimport tensorflow as tf\r\n\r\nimport adience_input\r\n\r\nFLAGS = tf.app.flags.FLAGS\r\n\r\ntf.app.flags.DEFINE_integer('batch_size', 50, \"\"\"Number of images to process in a batch.\"\"\")\r\ntf.app.flags.DEFINE_string('data_dir', './adience_data', \"\"\"Path to the ADIENCE data directory.\"\"\")\r\ntf.app.flags.DEFINE_boolean('use_fp16', False, \"\"\"Train the model using fp16.\"\"\")\r\n\r\nIMAGE_SIZE = adience_input.IMAGE_SIZE\r\nNUM_CLASSES = adience_input.NUM_CLASSES\r\nNUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = adience_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN\r\nNUM_EXAMPLES_PER_EPOCH_FOR_EVAL = adience_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL\r\n\r\nMOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.\r\nNUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.\r\nLEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.\r\nINITIAL_LEARNING_RATE = 0.1 # Initial learning rate.\r\n\r\nTOWER_NAME = 'tower'\r\n\r\ndef _activation_summary(x):\r\n tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)\r\n tf.summary.histogram(tensor_name + '/activations', x)\r\n tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))\r\n\r\ndef _variable_on_cpu(name, shape, initializer):\r\n with tf.device('/cpu:0'):\r\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\r\n var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)\r\n return var\r\n\r\ndef _variable_with_weight_decay(name, shape, stddev, wd):\r\n dtype = tf.float16 if FLAGS.use_fp16 else tf.float32\r\n var = _variable_on_cpu(name, shape,\r\n tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))\r\n\r\n if wd is not None:\r\n weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')\r\n tf.add_to_collection('losses', weight_decay)\r\n\r\n return var\r\n\r\ndef distored_inputs():\r\n if not FLAGS.data_dir:\r\n raise ValueError('Please supply a data_dir')\r\n\r\n data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')\r\n images, labels = adience_input.distored_inputs(data_dir=data_dir, batch_size=FLAGS.batch_size)\r\n\r\n if FLAGS.use_fp16:\r\n images = tf.cast(images, tf.float16)\r\n labels = tf.cast(labels, tf.float16)\r\n\r\n return images, labels\r\n\r\ndef inputs(eval_data):\r\n if not FLAGS.data_dir:\r\n raise ValueError('Please supply a data_dir')\r\n\r\n data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')\r\n images, labels = adience_input.inputs(eval_data=eval_data ,data_dir=data_dir, batch_size=FLAGS.batch_size)\r\n\r\n if FLAGS.use_fp16:\r\n images = tf.cast(images, tf.float16)\r\n labels = tf.cast(labels, tf.float16)\r\n\r\n return images, labels\r\n\r\ndef inference(images):\r\n # conv1\r\n with tf.variable_scope('conv1') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[7, 7, 3, 96],\r\n stddev=0.1,\r\n wd=None)\r\n conv = tf.nn.conv2d(images, kernel, [1, 4, 4, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [96], tf.constant_initializer(0.1))\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv1 = tf.nn.relu(pre_activation, name=scope.name)\r\n _activation_summary(conv1)\r\n\r\n # pool1\r\n pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\r\n padding='SAME', name='pool1')\r\n\r\n # norm1\r\n norm1 = tf.nn.local_response_normalization(pool1, depth_radius=5, alpha=0.0001, beta=0.75, name='norm1')\r\n\r\n # conv2\r\n with tf.variable_scope('conv2') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[5, 5, 96, 256],\r\n stddev=0.1,\r\n wd=None)\r\n conv = tf.nn.conv2d(norm1, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [256], tf.constant_initializer(0.1))\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv2 = tf.nn.relu(pre_activation, name=scope.name)\r\n _activation_summary(conv2)\r\n\r\n # pool2\r\n pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\r\n padding='SAME', name='pool2')\r\n\r\n # norm1\r\n norm2 = tf.nn.local_response_normalization(pool2, depth_radius=5, alpha=0.0001, beta=0.75, name='norm2')\r\n\r\n # conv3\r\n with tf.variable_scope('conv3') as scope:\r\n kernel = _variable_with_weight_decay('weights',\r\n shape=[3, 3, 256, 384],\r\n stddev=0.1,\r\n wd=None)\r\n conv = tf.nn.conv2d(norm2, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = _variable_on_cpu('biases', [384], tf.constant_initializer(0.1))\r\n pre_activation = tf.nn.bias_add(conv, biases)\r\n conv3 = tf.nn.relu(pre_activation, name=scope.name)\r\n _activation_summary(conv3)\r\n\r\n # pool3\r\n pool3 = tf.nn.max_pool(conv3, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],\r\n padding='SAME', name='pool3')\r\n\r\n po = tf.reshape(pool3, [-1, 384*8*8])\r\n\r\n # FC6\r\n with tf.variable_scope('fc6') as scope:\r\n weights = _variable_with_weight_decay('weights', shape=[384*8*8, 512], stddev=0.05, wd=None)\r\n biases = _variable_on_cpu('biases', [512], tf.constant_initializer(0.0))\r\n pre_activation = tf.add(tf.matmul(po, weights), biases)\r\n fc6 = tf.nn.relu(pre_activation, name=scope.name)\r\n _activation_summary(fc6)\r\n\r\n # drop6\r\n drop6 = tf.nn.dropout(fc6, keep_prob=0.5)\r\n\r\n # FC7\r\n with tf.variable_scope('fc7') as scope:\r\n weights = _variable_with_weight_decay('weights', shape=[512, 512], stddev=0.05, wd=None)\r\n biases = _variable_on_cpu('biases', [512], tf.constant_initializer(0.0))\r\n pre_activation = tf.add(tf.matmul(drop6, weights), biases)\r\n fc7 = tf.nn.relu(pre_activation, name=scope.name)\r\n _activation_summary(fc7)\r\n\r\n # drop7\r\n drop7 = tf.nn.dropout(fc7, keep_prob=0.5)\r\n\r\n # FC8\r\n with tf.variable_scope('fc8') as scope:\r\n weights = _variable_with_weight_decay('weights', shape=[512, 8], stddev=0.05, wd=None)\r\n biases = _variable_on_cpu('biases', [8], tf.constant_initializer(0.0))\r\n pre_activation = tf.add(tf.matmul(drop7, weights), biases)\r\n fc8 = tf.nn.relu(pre_activation, name=scope.name)\r\n _activation_summary(fc8)\r\n\r\n # softmax linear\r\n with tf.variable_scope('softmax_linear') as scope:\r\n weights = _variable_with_weight_decay('weights', [8, 8], stddev=1 / 8.0, wd=None)\r\n biases = _variable_on_cpu('biases', [NUM_CLASSES],\r\n tf.constant_initializer(0.0))\r\n softmax_linear = tf.add(tf.matmul(fc8, weights), biases, name=scope.name)\r\n _activation_summary(softmax_linear)\r\n\r\n return softmax_linear\r\n\r\ndef loss(logits, labels):\r\n labels = tf.cast(labels, tf.int64)\r\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')\r\n cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\r\n tf.add_to_collection('losses', cross_entropy_mean)\r\n\r\n return tf.add_n(tf.get_collection('losses'), name='total_loss')\r\n\r\ndef _add_loss_summaries(total_loss):\r\n \"\"\"\r\n Args:\r\n total_loss: Total loss from loss().\r\n Returns:\r\n loss_averages_op: op for generating moving averages of losses.\r\n \"\"\"\r\n # Compute the moving average of all individual losses and the total loss.\r\n loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')\r\n losses = tf.get_collection('losses')\r\n loss_averages_op = loss_averages.apply(losses + [total_loss])\r\n\r\n # Attach a scalar summary to all individual losses and the total loss; do the\r\n # same for the averaged version of the losses.\r\n for l in losses + [total_loss]:\r\n # Name each loss as '(raw)' and name the moving average version of the loss\r\n # as the original loss name.\r\n tf.summary.scalar(l.op.name + ' (raw)', l)\r\n tf.summary.scalar(l.op.name, loss_averages.average(l))\r\n\r\n return loss_averages_op\r\n\r\ndef train(total_loss, global_step):\r\n num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size\r\n decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)\r\n\r\n lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE, global_step, decay_steps, LEARNING_RATE_DECAY_FACTOR, staircase=True)\r\n tf.summary.scalar('learning_rate', lr)\r\n\r\n # Generate moving averages of all losses and associated summaries.\r\n loss_averages_op = _add_loss_summaries(total_loss)\r\n\r\n # Compute gradients.\r\n with tf.control_dependencies([loss_averages_op]):\r\n opt = tf.train.GradientDescentOptimizer(lr)\r\n grads = opt.compute_gradients(total_loss)\r\n\r\n # Apply gradients.\r\n apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)\r\n\r\n # Track the moving averages of all trainable variables.\r\n variable_averages = tf.train.ExponentialMovingAverage(\r\n MOVING_AVERAGE_DECAY, global_step)\r\n with tf.control_dependencies([apply_gradient_op]):\r\n variables_averages_op = variable_averages.apply(tf.trainable_variables())\r\n\r\n return variables_averages_op\r\n","sub_path":"adience.py","file_name":"adience.py","file_ext":"py","file_size_in_byte":9499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"596027413","text":"#\r\n# Simple example scene for a 2D simulation\r\n# Simulation of a buoyant smoke density plume with open boundaries at top & bottom\r\n#\r\nfrom manta import *\r\n\r\n# solver params\r\nres = 64\r\ngs = vec3(res,res,1)\r\ns = Solver(name='main', gridSize = gs, dim=2)\r\ns.timestep = 0.25\r\ntimings = Timings()\r\ndt = 0.25\r\n\r\n# prepare grids\r\nflags = s.create(FlagGrid)\r\nvel = s.create(MACGrid)\r\ndensity = s.create(RealGrid)\r\npressure = s.create(RealGrid)\r\n# Adding new grids\r\n#negDensity=s.create(RealGrid)\r\n#temperature = s.create(RealGrid)\r\n\r\nbWidth=1\r\nalpha = 0.5\r\nflags.initDomain(boundaryWidth=bWidth)\r\nflags.fillGrid()\r\n\r\nsetOpenBound(flags, bWidth,'yY',FlagOutflow|FlagEmpty)\r\n\r\nif (GUI):\r\n\tgui = Gui()\r\n\tgui.show( True )\r\n\tgui.pause()\r\n\r\nsource = s.create(Cylinder, center=gs*vec3(0.5,0.1,0.5), radius=res*0.14, z=gs*vec3(0, 0.02, 0))\r\n\r\n#create an object and apply to flag grids\r\n#obs = Box( parent=s, center=vec3(32,52,0.5),size=vec3(10,2,0.5))\r\n#obs.applyToGrid(grid=flags, value=FlagObstacle)\r\n\r\n#main loop\r\nfor t in range(500):\r\n\tmantaMsg('\\nFrame %i' % (s.frame))\r\n\r\n\tif t<300:\r\n\t\tsource.applyToGrid(grid=density, value=1)\r\n\r\n\tadvectSemiLagrange(flags=flags, vel=vel, grid=density, order=2)\r\n\tadvectSemiLagrange(flags=flags, vel=vel, grid=vel, order=2, openBounds=True, boundaryWidth=bWidth)\r\n\tresetOutflow(flags=flags,real=density)\r\n\r\n\t#diffuseTemperatureExplicit(flags,density,alpha)\r\n\tdiffuseTemperatureImplicit(flags,density, alpha,dt)\r\n\r\n\tsetWallBcs(flags=flags, vel=vel)\r\n\taddBuoyancy(density=density, vel=vel, gravity=vec3(0,-4e-3,0), flags=flags)\r\n\t#negateDensity(density=density,negDensity=negDensity)\r\n\tsolvePressure(flags=flags, vel=vel, pressure=pressure)\r\n\r\n\t#negateDensity(density=density,negDensity=negDensity)\r\n\t\r\n\t#timings.display()\r\n\tif (t == 300):\r\n\t\tgui.screenshot( 'plume_2d_unstable_%04d.png' % t );\r\n\t\r\n\r\n\ts.step()\r\n","sub_path":"Assignment4/plume_2d.py","file_name":"plume_2d.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"369612599","text":"import collections\n\nclass Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n if not s: return 0\n l = r = 0\n res = 0\n counter = collections.defaultdict(int)\n while r < len(s):\n while counter[s[r]] >= 1:\n counter[s[l]] -= 1\n l += 1\n counter[s[r]] += 1\n res = max(res, r - l + 1)\n r += 1\n return res\n","sub_path":"src/solutions/q3.py","file_name":"q3.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"641215560","text":"import pandas as pd\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output, State\n\nfrom ...app import *\nfrom ...utils import *\nfrom ... import database as db\nfrom ..config.macros import loadMacros\nfrom ..tables import masters as table\n\n\nrepCard = html.Div([\n\n dbc.Row([\n\n dbc.Col(dbc.InputGroup([\n\n dbc.InputGroupAddon(\n \"Nome\", addon_type=\"prepend\", \n className='input-group-prepend-110'\n ),\n\n dbc.Input(id='in-check-masters-name'),\n\n ]), xs=12, sm=12, md=8, lg=8, xl=8),\n\n dbc.Col(dbc.InputGroup([\n\n dbc.InputGroupAddon(\n \"Região\", addon_type=\"prepend\", \n className='input-group-prepend-110'\n ),\n \n dbc.Select(\n id='dd-check-masters-region',\n options=loadMacros('regiao', addBlank=True), \n )\n\n ]), className='breakColLine', xs=12, sm=12, md=4, lg=4, xl=4),\n \n ], className='breakRowLine'),\n\n dbc.Row([\n\n dbc.Col(dbc.InputGroup([\n\n dbc.InputGroupAddon(\n \"Rádio\", addon_type=\"prepend\", \n className='input-group-prepend-110'\n ),\n\n dbc.Select(\n id='dd-check-masters-model',\n options=loadMacros(\n 'radio_modelo', \n addBlank=True,\n addNull=True\n ), \n )\n\n ]), xs=12, sm=12, md=4, lg=4, xl=4),\n\n dbc.Col(dbc.InputGroup([\n\n dbc.InputGroupAddon(\n \"Enlace\", addon_type=\"prepend\", \n className='input-group-prepend-110'\n ),\n\n dbc.Select(\n id='dd-check-masters-compat',\n options=loadMacros(\n 'radio_compat', \n addBlank=True,\n addNull=True\n ), \n )\n\n ]), className='breakColLine', xs=12, sm=12, md=4, lg=4, xl=4),\n\n dbc.Col(dbc.InputGroup([\n\n dbc.InputGroupAddon(\n \"Varredura\", addon_type=\"prepend\", \n className='input-group-prepend-110'\n ),\n\n dbc.Select(\n id='dd-check-masters-scan',\n options=loadMacros('scan', addBlank=True), \n )\n\n ]), className='breakColLine', xs=12, sm=12, md=4, lg=4, xl=4),\n\n ], className='breakRowLine'),\n\n dbc.Row([\n\n dbc.Col(dbc.InputGroup([\n\n dbc.InputGroupAddon(\n \"Busca\", addon_type=\"prepend\", \n className='input-group-prepend-110'\n ),\n\n dbc.Select(\n id='dd-check-masters-query-mode',\n options=[\n dict(label='Agrupada', value=1),\n dict(label='Separada', value=0),\n ], value=1,\n )\n\n ]), xs=12, sm=12, md=4, lg=4, xl=4),\n\n dbc.Col(dbc.InputGroup([\n\n dbc.InputGroupAddon(\n \"Porta Tx\", addon_type=\"prepend\", \n className='input-group-prepend-110'\n ),\n\n dbc.Input(id='in-check-masters-tx', type='number'),\n\n ]), className='breakColLine', xs=12, sm=12, md=4, lg=4, xl=4),\n\n dbc.Col(dbc.InputGroup([\n\n dbc.InputGroupAddon(\n \"Porta Rx\", addon_type=\"prepend\", \n className='input-group-prepend-110'\n ),\n\n dbc.Input(id='in-check-masters-rx', type='number'),\n\n ]), className='breakColLine', xs=12, sm=12, md=4, lg=4, xl=4),\n\n ], className='breakRowLine'),\n \n dbc.Row([\n\n dbc.Col(dbc.InputGroup([\n\n dbc.InputGroupAddon(\n \"FE\", addon_type=\"prepend\", \n className='input-group-prepend-110'\n ),\n\n dbc.Input(id='in-check-masters-fe', type='number'),\n\n ]), xs=12, sm=12, md=4, lg=4, xl=4),\n\n dbc.Col(dbc.InputGroup([\n\n dbc.InputGroupAddon(\n \"CN\", addon_type=\"prepend\", \n className='input-group-prepend-110'\n ),\n\n dbc.Input(id='in-check-masters-cn', type='number'),\n\n ]), className='breakColLine', xs=12, sm=12, md=4, lg=4, xl=4),\n\n ], className='breakRowLine'),\n\n])\n\n\nlayout = html.Div([\n\n html.H4('Consultar > Masters'),\n html.Hr(),\n\n html.Details([\n smrFilter,\n repCard,\n\n # Buttons\n dbc.Button(\n id='check-masters-bt-search',\n n_clicks=0, className='fas fa-search'\n ),\n dbc.Button(\n id='check-masters-bt-clear',\n n_clicks=0, className='fas fa-eraser'\n ),\n ]),\n\n html.Hr(),\n\n # Buttons tooltips\n dbc.Tooltip(\n 'Buscar informações.', \n target='check-masters-bt-search'\n ),\n dbc.Tooltip(\n 'Limpar filtros.', \n target='check-masters-bt-clear'\n ), \n\n dcc.Loading(\n table,\n color='#d52b1e',\n type='cube',\n ),\n\n])\n\n\n@app.callback(\n Output('table-masters', 'data'), \n [Input('check-masters-bt-search', 'n_clicks')], \n [State('in-check-masters-name', 'value'),\n State('dd-check-masters-region', 'value'),\n State('dd-check-masters-model', 'value'),\n State('dd-check-masters-compat', 'value'),\n State('dd-check-masters-scan', 'value'),\n State('dd-check-masters-query-mode', 'value'),\n State('in-check-masters-fe', 'value'),\n State('in-check-masters-cn', 'value'),\n State('in-check-masters-tx', 'value'),\n State('in-check-masters-rx', 'value')]\n)\ndef feed_table(n_clicks, repName, repReg, radModel, radCompat, \n dnpScan, queryMode, fe, cn, txPort, rxPort):\n '''Feed masters table based on its filters.'''\n\n if n_clicks == 0:\n raise PreventUpdate\n\n # Fix inputs\n fe = toNone(fe)\n cn = toNone(cn)\n txPort = toNone(txPort)\n rxPort = toNone(rxPort)\n repReg = toNone(repReg)\n dnpScan = toNone(dnpScan)\n repName = toNone(repName)\n radModel = toNone(radModel)\n radCompat = toNone(radCompat)\n queryMode = int(queryMode)\n\n if queryMode == 0:\n df = db.corr.readMasterFull(repName, repReg, dnpScan, radModel, radCompat, txPort, rxPort, fe, cn)\n else:\n df = db.corr.readMasterGroup(repName, repReg, dnpScan, radModel, radCompat, txPort, rxPort, fe, cn)\n\n # Add index to table\n df = df.reset_index()\n # Replace nan to None\n df = df.where((pd.notnull(df)), None)\n # DataFrame to dict\n data = df.to_dict('records')\n\n return data\n\n\n@app.callback(\n [Output('in-check-masters-name', 'value'),\n Output('dd-check-masters-region', 'value'),\n Output('dd-check-masters-model', 'value'),\n Output('dd-check-masters-compat', 'value'),\n Output('dd-check-masters-scan', 'value'),\n Output('in-check-masters-fe', 'value'),\n Output('in-check-masters-cn', 'value'),\n Output('in-check-masters-tx', 'value'),\n Output('in-check-masters-rx', 'value'),\n Output('dd-check-masters-query-mode', 'value')], \n [Input('check-masters-bt-clear', 'n_clicks')]\n)\ndef clearFilters(nc):\n \"\"\"Clear all filled filters.\"\"\"\n preventUpdateNC(nc)\n return (\n '', None, None, None, None,\n '', '', '', '', 1\n )\n \n\n","sub_path":"correlacional/app/layout/check/masters.py","file_name":"masters.py","file_ext":"py","file_size_in_byte":7329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"368119410","text":"f = open('input8')\ndata = ''\nfor i in f:\n data += str(i).rstrip()\nf.close()\n\n#data = '0222112222120000'\n\nwidth = 25\nheight = 6\n\nlayers = []\nfor i in range((len(data))//(height * width)): #execute loop once per layer\n templayer = ''\n for j in range(height): #extract contents of each layer from string\n templayer += data[(i*width*height)+width*j:(i*width*height)+width+width*j]\n layers.append(templayer) #one layer per index of list\n\nfinalImage = list(layers[0]) #use list so I can replace chars without creating a whole new string\nfor i in range(len(layers)):\n for j in range(len(layers[0])):\n if finalImage[j] == '2': #replace transparent pixels with appropriate value, going from top to bottom\n finalImage[j] = layers[i][j]\n\nfinalImageString = '' #convert back to string\nfor i in finalImage:\n finalImageString += i\n\nfor i in range(height): #print the string, formatted so it's the proper dimensions, and change chars so it's readable\n print((finalImageString[0 + width*i : width + width*i ]).replace('0','_').replace('1','#'))\n\n","sub_path":"D8 Pt 2.py","file_name":"D8 Pt 2.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"309169610","text":"from django.shortcuts import render\nfrom .forms import *\nfrom . import models\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\nfrom django.db.models import Q\nfrom django.views.generic import View\nimport qrcode\n\nfrom .utils import render_to_pdf\n\n# Create your views here.\n\n\ndef employee(request):\n \"\"\"Employee view\"\"\"\n context={}\n if request.method == 'POST':\n employee_form = Employee(request.POST or None, request.FILES or None)\n if employee_form.is_valid():\n #employee_form.save()\n pass\n else:\n return HttpResponse(str(employee_form.errors))\n else:\n employee_form = Employee()\n\n context['employee_form'] = employee_form\n\n return render(request, 'user/employee.html', context)\n\ndef client(request):\n \"\"\"Client view\"\"\"\n context={}\n if request.method == 'POST':\n client_form = Client(request.POST or None, request.FILES or None)\n if client_form.is_valid():\n #client_form.save()\n pass\n else:\n return HttpResponse(str(client_form.errors))\n else: \n client_form = Client()\n context['client_form'] = client_form\n\n return render(request, 'user/client.html',context)\n\ndef client_pdf(request):\n context = {}\n if request.method == 'POST':\n client_form = Client(request.POST or None, request.FILES or None)\n if client_form.is_valid():\n cid = request.POST.get('client_id')\n client_name = request.POST.get('client_name')\n client_form.save()\n\n dictionary = {\n 'client_name': client_name,\n }\n\n qr = qrcode.QRCode(\n version=5,\n error_correction=qrcode.constants.ERROR_CORRECT_H,\n box_size=10,\n border=4,\n )\n\n qr.add_data(dictionary)\n qr.make(fit=True)\n img = qr.make_image()\n img.save(f\"media/qrcode/client_{cid}.jpg\")\n context['dict'] = dictionary\n else:\n return HttpResponse(str(client_form.errors))\n\n else:\n client_form = Client()\n context['client_form'] = client_form\n return redirect('client_pdf', cid = cid)\n\n\n\ndef employee_pdf(request):\n context = {}\n if request.method == 'POST':\n employee_form = Employee(request.POST or None, request.FILES or None)\n if employee_form.is_valid():\n eid = request.POST.get('employee_id')\n employee_name = request.POST.get('employee_name')\n employee_form.save()\n\n dictionary = {\n 'employee_name': employee_name,\n }\n\n qr = qrcode.QRCode(\n version=5,\n error_correction=qrcode.constants.ERROR_CORRECT_H,\n box_size=10,\n border=4,\n )\n qr.add_data(dictionary)\n qr.make(fit=True)\n img = qr.make_image()\n img.save(f\"media/qrcode/employee_{eid}.jpg\")\n context['dict'] = dictionary\n\n # qr_data = employee.objects.filter(employee_id = eid)\n else:\n return HttpResponse(str(employee_form.errors))\n else:\n employee_form = Employee()\n context['employee_form'] = employee_form\n\n return redirect('generate_pdf', eid = eid)\n\nclass GeneratePdf(View):\n def get(self, request, eid, *args, **kwargs):\n #qr_data = employee.objects.filter(employee_id = 111)\n qr_data = models.employee.objects.filter(employee_id = eid)\n\n data = {\n 'employee_name':qr_data[0].employee_name,\n 'employee_id':qr_data[0].employee_id,\n }\n\n pdf = render_to_pdf('user/generate_pdf.html', data)\n return HttpResponse(pdf, content_type='application/pdf')\n\nclass ClientPdf(View):\n def get(self, request, cid, *args, **kwargs):\n\n qr_data = models.client.objects.filter(client_id = cid)\n\n data = {\n 'client_name': qr_data[0].client_name,\n 'client_id': qr_data[0].client_id,\n }\n\n pdf = render_to_pdf('user/client_pdf.html', data)\n return HttpResponse(pdf, content_type='application/pdf')","sub_path":"app/user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"342639983","text":"import sys,collections as cl,bisect as bs\nsys.setrecursionlimit(100000)\nmod = 10**9+7\nMax = sys.maxsize\ndef l(): #intのlist\n return list(map(int,input().split()))\ndef m(): #複数文字\n return map(int,input().split())\ndef onem(): #Nとかの取得\n return int(input())\ndef s(x): #圧縮\n a = []\n aa = x[0]\n su = 1\n for i in range(len(x)-1):\n if aa != x[i+1]:\n a.append([aa,su])\n aa = x[i+1]\n su = 1\n else:\n su += 1\n a.append([aa,su])\n return a\ndef jo(x): #listをスペースごとに分ける\n return \" \".join(map(str,x))\ndef max2(x): #他のときもどうように作成可能\n return max(map(max,x))\ndef In(x,a): #aがリスト(sorted)\n k = bs.bisect_left(a,x)\n if k != len(a) and a[k] == x:\n return True\n else:\n return False\n\"\"\"\ndef nibu(x,n,r):\n ll = 0\n rr = r\n while True:\n mid = (ll+rr)//2\n\n if rr == mid:\n return ll\n if (ここに評価入れる):\n rr = mid\n else:\n ll = mid+1\n\"\"\"\n\nN,K = m()\n\na = l()\n\n\naaa = [0 for i in range(N+1)]\naaa[1] = a[0] % K\nfor i in range(1,N):\n aaa[i+1] = aaa[i] + a[i]\n\ncount = 0\nddd = dict()\nddd[0] = 1\nif K == 1:\n print(0)\nelse:\n for i in range(1,N+1):\n ppp = (aaa[i] - i)%K\n count += ddd.get(ppp,0)\n ddd[ppp] = ddd.get(ppp,0) + 1\n if i - K + 1 >= 0:\n ddd[(aaa[i-K+1] - i + K - 1)%K] -= 1\n print(count)\n\n\n\n\n\n\n\n\n","sub_path":"Python_codes/p02851/s664522045.py","file_name":"s664522045.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"653422545","text":"#!/usr/bin/env python3\nclass Factorial:\n def __init__(self, n, mod):\n self.f = f =[0] * (n + 1)\n f[0] = b = 1\n for i in range(1, n + 1):f[i] = b = b * i % mod\n self.inv = inv = [0] * (n + 1)\n inv[n] = b = pow(self.f[n], -1, mod)\n for i in range(n,0,-1):inv[i-1] = b = b * i % mod\n self.mod = mod\n def factorial(self, i):\n return self.f[i]\n def ifactorial(self, i):\n return self.inv[i]\n def comb(self, n, k):\n if n >= k:return self.f[n] * self.inv[n - k] * self.inv[k] % self.mod\n else:return 0\n\nMOD = 998244353\nnCr = Factorial(5*10**5, MOD).comb\nN, M, k = map(int, input().split())\nprint(sum(nCr(N - 1, x) * M * pow(M - 1, N - x - 1, MOD) % MOD for x in range(-~k)) % MOD)","sub_path":"Python_codes/p02685/s890269370.py","file_name":"s890269370.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"382292","text":"import Lane_Detection as ld\nimport cv2\nimport time\nimport serial\n\nclass AICar:\n curr_steering_angle = 90\n ser = None\n received = None\n \n def __init__(self):\n self.ser = serial.Serial(port='/dev/ttyUSB0', baudrate=115200, timeout=0.001)\n self.received = []\n self.ser.write(b'begin\\n')\n time.sleep(3)\n \n print(\"Car object created.\")\n \n def startAIDriving(self):\n detector = ld.LaneDetector(320, 240)\n \n self.ser.write(str.encode(str('1 ')))\n time.sleep(0.03)\n \n print(\"AI driving started...\")\n detector.drive(self.ser)\n \n while self.ser.inWaiting() > 0:\n line = self.ser.readline()\n if line:\n self.received.append(line.decode().strip())\n \n print(\"AI driving stopped.\")\n \n def startManualDriving(self):\n input(\"Not implemented. Press any key to exit.\")\n ","sub_path":"src/pi/l3_DomainLayer.py","file_name":"l3_DomainLayer.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"496433146","text":"from django.utils.deconstruct import deconstructible\nfrom django.conf import settings\nfrom uuid import uuid4\nimport os, errno\n\n\n@deconstructible\nclass UploadToPathAndRename(object):\n def __init__(self, path):\n self.sub_path = path\n\n def __call__(self, instance, filename):\n ext = filename.split('.')[-1]\n local_path = os.path.join(settings.MEDIA_ROOT, self.sub_path)\n\n try:\n os.makedirs(local_path)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n filename = '{}.{}'.format(uuid4().hex, ext)\n return os.path.join(self.sub_path, filename)\n","sub_path":"arcutils/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"66205146","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport json\nimport matplotlib.patches as mpatches\nimport argparse\nimport os\n\ndef plot2Stereotypes(group, args):\n if group == args.g1:\n _, _, data, _, app, _ = getData(args)\n else:\n _, _, _, data, _, app = getData(args)\n key1, key2 = list(data.keys())[0], list(data.keys())[1]\n data1 = data[key1]\n data2 = data[key2]\n x = np.arange(len(data1) + len(data2))\n width = 0.35\n\n fig, ax = plt.subplots()\n bars = ax.bar(x, list(data1.values()) + list(data2.values()), width)\n ax.set_title(group + \" Score\")\n ax.set_ylabel(\"Score\")\n ax.set_xticks(x)\n ax.set_xticklabels(list(data1.keys()) + list(data2.keys()))\n fig.legend(handles=[mpatches.Patch(color='red', label=key1), mpatches.Patch(color='blue', label=key2)])\n for i in range(len(data1)):\n ax.get_children()[i].set_color('r')\n for i in range(len(data1), len(data1) + len(data2)):\n ax.get_children()[i].set_color('b')\n fig.tight_layout()\n fig.savefig(\"graphs/%s/%s_score.png\" %(args.folder, group))\n\ndef plotStereotypeAveraged(group, args):\n if group == args.g1:\n _, _, data, _, app, _ = getData(args)\n else:\n _, _, _, data, _, app = getData(args)\n\n key1, key2 = list(data.keys())[0], list(data.keys())[1]\n\n data1 = data[key1]\n data2 = data[key2]\n\n average1 = {}\n average2 = {}\n\n for key in data1:\n try:\n average1[key] = data1[key] / app[key]\n except ZeroDivisionError:\n average1[key] = 0\n for key in data2:\n try:\n average2[key] = data2[key] / app[key]\n except ZeroDivisionError:\n average2[key] = 0\n\n average1 = sortDict(average1)\n average2 = sortDict(average2)\n x = np.arange(len(average1) + len(average2))\n width = 0.35\n\n fig, ax = plt.subplots()\n bars = ax.bar(x, list(average1.values()) + list(average2.values()), width)\n ax.set_title(group + \" Score (Averaged)\")\n ax.set_ylabel(\"Score\")\n ax.set_xticks(x)\n ax.set_xticklabels(list(average1.keys()) + list(average2.keys()))\n fig.legend(handles=[mpatches.Patch(color='red', label=key1), mpatches.Patch(color='blue', label=key2)])\n for i in range(len(average1)):\n ax.get_children()[i].set_color('r')\n for i in range(len(average1), len(average1) + len(average2)):\n ax.get_children()[i].set_color('b')\n fig.tight_layout()\n fig.savefig(\"graphs/%s/%s_score_average.png\" %(args.folder, group))\n\ndef plotWordProbabilities(args, group):\n if group == args.g1:\n _, _, data, _, app, _ = getData(args)\n else:\n _, _, _, data, _, app = getData(args)\n\n key1, key2 = list(data.keys())[0], list(data.keys())[1]\n data1 = data[key1]\n data2 = data[key2]\n\n sum = 0\n for key in data1:\n try:\n data1[key] = data1[key] / app[key]\n except ZeroDivisionError:\n data1[key] = 0\n sum += data1[key]\n for key in data2:\n try:\n data2[key] = data2[key] / app[key]\n except ZeroDivisionError:\n data2[key] = 0\n sum += data2[key]\n\n for key in data1:\n data1[key] /= sum\n for key in data2:\n data2[key] /= sum # probabilites ptgt/pprior\n\n data1 = sortDict(data1)\n data2 = sortDict(data2)\n\n\n x = np.arange(len(data1) + len(data2))\n width = 0.35\n\n fig, ax = plt.subplots()\n bars = ax.bar(x, list(data1.values()) + list(data2.values()), width)\n ax.set_title(group + \"Probabilities\")\n ax.set_ylabel(\"Probability\")\n ax.set_xticks(x)\n ax.set_xticklabels(list(data1.keys()) + list(data2.keys()))\n fig.legend(handles=[mpatches.Patch(color='red', label=key1), mpatches.Patch(color='blue', label=key2)])\n for i in range(len(data1)):\n ax.get_children()[i].set_color('r')\n for i in range(len(data1), len(data1) + len(data2)):\n ax.get_children()[i].set_color('b')\n fig.tight_layout()\n fig.savefig(\"graphs/%s/%s_probabilities.png\" %(args.folder, group))\n\ndef plotWordProbabilitiesSBS(args):\n _, _, data1, data2, app1, app2 = getData(args)\n key1, key2 = list(data1.keys())[0], list(data1.keys())[1]\n x1 = np.arange(len(data1[key1]))\n x2 = np.arange(len(data1[key2]))\n width = 0.35\n fig, ax = plt.subplots(1, 2, sharey=True)\n\n l1 = []\n l2 = []\n l3 = []\n l4 = []\n\n sum1 = 0\n sum2 = 0\n for key in data1[key1]:\n try:\n data1[key1][key] = data1[key1][key] / app1[key]\n except ZeroDivisionError:\n data1[key1][key] = 0\n try:\n data2[key1][key] = data2[key1][key] / app2[key]\n except ZeroDivisionError:\n data2[key1][key] = 0\n sum1 += data1[key1][key]\n sum2 += data2[key1][key]\n\n for key in data1[key2]:\n try:\n data1[key2][key] = data1[key2][key] / app1[key]\n except ZeroDivisionError:\n data1[key2][key] = 0\n try:\n data2[key2][key] = data2[key2][key] / app2[key]\n except ZeroDivisionError:\n data2[key2][key] = 0\n sum1 += data1[key2][key]\n sum2 += data2[key2][key]\n\n for key in data1[key1]:\n data1[key1][key] /= sum1\n data2[key1][key] /= sum2\n for key in data1[key2]:\n data1[key2][key] /= sum1\n data2[key2][key] /= sum2\n\n data1[key1] = sortDict(data1[key1])\n data1[key2] = sortDict(data1[key2])\n\n for key in data1[key1]:\n l1.append(data1[key1][key])\n l2.append(data2[key1][key])\n\n for key in data1[key2]:\n l3.append(data1[key2][key])\n l4.append(data2[key2][key])\n\n rects1 = ax[0].bar(x1 - width/2, l1, width, label=args.g1)\n rects2 = ax[0].bar(x1 + width/2, l2, width, label=args.g2)\n\n rects3 = ax[1].bar(x2 - width/2, l3, width, label=args.g1)\n rects4 = ax[1].bar(x2 + width/2, l4, width, label=args.g2)\n\n\n label1 = []\n for i, word in enumerate(data1[key1]):\n if i % args.skip == 0:\n label1.append(word)\n else:\n label1.append('')\n\n label2 = []\n for i, word in enumerate(data1[key2]):\n if i % args.skip == 0:\n label2.append(word)\n else:\n label2.append('')\n ax[0].set_ylabel('Probability', fontsize=20)\n ax[0].set_title(key1, fontsize=20)\n ax[0].set_xticks(x1)\n ax[0].set_xticklabels(label1, rotation=45, fontsize=18, horizontalalignment='right')\n ax[0].legend(prop={'size':24})\n ax[1].set_title(key2, fontsize=20)\n ax[1].set_xticks(x2)\n ax[1].set_xticklabels(label2, rotation=45, fontsize=18, horizontalalignment='right')\n ax[1].legend(prop={'size':24})\n\n fig.tight_layout()\n fig.savefig(\"graphs/%s/%s_vs_%s_prob_SBS.png\" %(args.folder, args.g1, args.g2))\n\n\ndef plotSideBySide(args):\n _, _, data1, data2, _, _ = getData(args)\n key1, key2 = list(data1.keys())[0], list(data1.keys())[1]\n x1 = np.arange(len(data1[key1]))\n x2 = np.arange(len(data1[key2]))\n width = 0.35\n fig, ax = plt.subplots(1, 2, sharey=True)\n\n l1 = []\n l2 = []\n l3 = []\n l4 = []\n\n for key in data1[key1]:\n l1.append(data1[key1][key])\n l2.append(data2[key1][key])\n\n for key in data1[key2]:\n l3.append(data1[key2][key])\n l4.append(data2[key2][key])\n\n rects1 = ax[0].bar(x1 - width/2, l1, width, label=args.g1)\n rects2 = ax[0].bar(x1 + width/2, l2, width, label=args.g2)\n\n rects3 = ax[1].bar(x2 - width/2, l3, width, label=args.g1)\n rects4 = ax[1].bar(x2 + width/2, l4, width, label=args.g2)\n\n ax[0].set_ylabel('Score')\n ax[0].set_title(key1)\n ax[0].set_xticks(x1)\n ax[0].set_xticklabels(list(data1[key1].keys()))\n ax[0].legend()\n ax[1].set_title(key2)\n ax[1].set_xticks(x2)\n ax[1].set_xticklabels(list(data1[key2].keys()))\n ax[1].legend()\n\n fig.tight_layout()\n fig.savefig(\"graphs/%s/%s_vs_%s_score_SBS.png\" %(args.folder, args.g1, args.g2))\n\ndef plotNormSideBySide(args):\n _, _, data1, data2, _, _ = getData(args)\n key1, key2 = list(data1.keys())[0], list(data1.keys())[1]\n x = np.arange(len(data1[key1]) + len(data1[key2]))\n width = 0.35\n fig, ax = plt.subplots(1)\n\n word1_score = {}\n word2_score = {}\n word1_sum = 0\n word2_sum = 0\n for key in data1[key1]:\n word1_score[key] = data1[key1][key]\n word1_sum += data1[key1][key]\n word2_score[key] = data2[key1][key]\n word2_sum += data2[key1][key]\n for key in data1[key2]:\n word1_score[key] = data1[key2][key]\n word1_sum += data1[key2][key]\n word2_score[key] = data2[key2][key]\n word2_sum += data2[key2][key]\n\n for key in word1_score:\n word1_score[key] /= word1_sum\n word2_score[key] /= word2_sum\n\n rects1 = ax.bar(x - width/2, list(word1_score.values()), width, label=args.g1)\n rects2 = ax.bar(x + width/2, list(word2_score.values()), width, label=args.g2)\n\n ax.set_ylabel('Score')\n ax.set_title(\"Score Normalized Over Groups\")\n ax.set_xticks(x)\n ax.set_xticklabels(list(word1_score.keys()))\n ax.legend()\n\n fig.tight_layout()\n fig.savefig(\"graphs/%s/%s_vs_%s_score_norm_SBS.png\" %(args.folder, args.g1, args.g2))\n\ndef plotNormPerBar(args):\n _, _, data1, data2, _, _ = getData(args)\n key1, key2 = list(data1.keys())[0], list(data1.keys())[1]\n x = np.arange(len(data1[key1]) + len(data1[key2]))\n width = 0.35\n fig, ax = plt.subplots(1)\n\n word1_score = {}\n word2_score = {}\n for key in data1[key1]:\n word1_score[key] = data1[key1][key]\n word2_score[key] = data2[key1][key]\n for key in data1[key2]:\n word1_score[key] = data1[key2][key]\n word2_score[key] = data2[key2][key]\n\n for key in word1_score:\n try:\n word1_score[key] /= (word1_score[key] + word2_score[key])\n except ZeroDivisionError:\n word1_score[key] = 0\n if word1_score[key] != 0:\n word2_score[key] = 1 - word1_score[key]\n else:\n word2_score[key] = 0\n\n rects1 = ax.bar(x - width/2, list(word1_score.values()), width, label=args.g1)\n rects2 = ax.bar(x + width/2, list(word2_score.values()), width, label=args.g2)\n\n ax.set_ylabel('Score')\n ax.set_title(\"Normalized Scores Over Bars\")\n ax.set_xticks(x)\n ax.set_xticklabels(list(word1_score.keys()))\n ax.legend()\n\n fig.tight_layout()\n fig.savefig(\"graphs/%s/%s_vs_%s_score_npb.png\" %(args.folder, args.g1, args.g2))\n\ndef plotNormOcc(args):\n _, _, data1, data2, app1, app2 = getData(args)\n x = np.arange(len(app1))\n width = 0.35\n fig, ax = plt.subplots(1)\n data1 = data1[list(data1.keys())[0]]\n data2 = data2[list(data2.keys())[1]]\n\n d1 = {}\n d2 = {}\n for key in data1:\n d1[key] = app1[key]\n d2[key] = app2[key]\n for key in data2:\n d1[key] = app1[key]\n d2[key] = app2[key]\n\n sum1 = 0\n sum2 = 0\n for key in d1:\n sum1 += d1[key]\n sum2 += d2[key]\n\n for key in d1:\n d1[key] /= sum1\n d2[key] /= sum2\n\n rects1 = ax.bar(x - width/2, list(d1.values()), width, label=args.g1)\n rects2 = ax.bar(x + width/2, list(d2.values()), width, label=args.g2)\n\n ax.set_ylabel('Score')\n ax.set_title(\"occ_norm\")\n ax.set_xticks(x)\n ax.set_xticklabels(list(d1.keys()))\n ax.legend()\n\n fig.tight_layout()\n fig.savefig(\"graphs/%s/%s_vs_%s_occurence_norm.png\" % (args.folder, args.g1, args.g2))\n\ndef plotAvgSBS(args):\n _, _, data1, data2, app1, app2 = getData(args)\n keys = list(data1.keys())\n key1 = keys[0]\n key2 = keys[1]\n x1 = np.arange(len(data1[key1]))\n x2 = np.arange(len(data1[key2]))\n width = 0.35\n fig, ax = plt.subplots(1, 2, sharey=True)\n\n\n for key in data1[key1]:\n try:\n data1[key1][key] = data1[key1][key] / app1[key]\n except ZeroDivisionError:\n data1[key1][key] = 0\n try:\n data2[key1][key] = data2[key1][key] / app2[key]\n except ZeroDivisionError:\n data2[key1][key] = 0\n for key in data1[key2]:\n try:\n data1[key2][key] = data1[key2][key] / app1[key]\n except ZeroDivisionError:\n data1[key2][key] = 0\n try:\n data2[key2][key] = data2[key2][key] / app2[key]\n except ZeroDivisionError:\n data2[key2][key] = 0\n\n data1[key1] = sortDict(data1[key1])\n data1[key2] = sortDict(data1[key2])\n\n l1 = []\n l2 = []\n l3 = []\n l4 = []\n\n for key in data1[key1]:\n l1.append(data1[key1][key])\n l2.append(data2[key1][key])\n\n for key in data1[key2]:\n l3.append(data1[key2][key])\n l4.append(data2[key2][key])\n\n rects1 = ax[0].bar(x1 - width/2, l1, width, label=args.g1)\n rects2 = ax[0].bar(x1 + width/2, l2, width, label=args.g2)\n\n rects3 = ax[1].bar(x2 - width/2, l3, width, label=args.g1)\n rects4 = ax[1].bar(x2 + width/2, l4, width, label=args.g2)\n\n ax[0].set_ylabel('Score')\n ax[0].set_title(key1 + \"(avg)\")\n ax[0].set_xticks(x1)\n ax[0].set_xticklabels(list(data1[key1].keys()))\n ax[0].legend()\n ax[1].set_title(key2 + \"(avg)\")\n ax[1].set_xticks(x2)\n ax[1].set_xticklabels(list(data1[key2].keys()))\n ax[1].legend()\n\n fig.tight_layout()\n fig.savefig(\"graphs/%s/%s_vs_%s_average_score_SBS.png\" % (args.folder, key1, key2))\n\ndef sortDict(d):\n d = {k: v for k, v in sorted(d.items(), key=lambda item: item[1], reverse=True)}\n return d\n\ndef getData(args):\n f1 = open(args.f1, 'r')\n j = json.load(f1)\n data1 = j[args.ste]\n app1 = j[args.ste][\"appearances\"]\n f1.close()\n\n f2 = open(args.f2, 'r')\n i = json.load(f2)\n data2 = i[args.ste]\n app2 = i[args.ste][\"appearances\"]\n f2.close()\n\n if args.restrict:\n include = [\"ineffective\", \"passive\", \"incompetent\", \"unproductive\", \"ineffective\", \"unambitious\", \"passive\", \"indecisive\", \"weak\", \"gentle\", \"timid\",\"unassertive\"]\n d1 = {}\n d2 = {}\n for pkey in data1:\n d1[pkey] = {}\n for key in data1[pkey]:\n if key in include:\n d1[pkey][key] = data1[pkey][key]\n for pkey in data2:\n d2[pkey] = {}\n for key in data2[pkey]:\n if key in include:\n d2[pkey][key] = data2[pkey][key]\n else:\n d1 = data1\n d2 = data2\n\n return j, i, d1, d2, app1, app2\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--f1', default='logs/she_scores.json', type=str)\n parser.add_argument('--f2', default='logs/he_scores.json', type=str)\n parser.add_argument('--ste', default=\"occupation\", type=str)\n parser.add_argument('--folder', default=\"basic\", type=str)\n parser.add_argument('--g1', default=\"she\", type=str)\n parser.add_argument('--g2', default=\"he\", type=str)\n parser.add_argument('--restrict', default=False, type=bool)\n parser.add_argument('--skip', default=1, type=int)\n args = parser.parse_args()\n print(args)\n\n if not os.path.isdir(\"graphs/%s\" %args.folder):\n os.mkdir(\"graphs/%s\" %args.folder)\n\n # plot2Stereotypes(args.g1, args)\n plotStereotypeAveraged(args.g1, args)\n #\n # plot2Stereotypes(args.g2, args)\n plotStereotypeAveraged(args.g2, args)\n #\n # plotSideBySide(args)\n # plotNormSideBySide(args)\n # plotNormPerBar(args)\n # plotNormOcc(args)\n plotAvgSBS(args)\n # plotWordProbabilities(args, args.g1)\n # plotWordProbabilities(args, args.g2)\n plotWordProbabilitiesSBS(args)\n plt.show()\n","sub_path":"histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":15461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"470585944","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom urllib import request\nimport os\n\nclass MeisPipeline(object):\n def __init__(self):\n self.path = os.path.join(os.path.dirname(__file__), \"image\")\n if not os.path.exists(self.path):\n os.mkdir(self.path)\n\n def process_item(self, item, spider):\n category = item[\"category\"]\n url = item[\"img_url\"]\n category_path = os.path.join(self.path, category)# error\n if not os.path.exists(category_path):\n os.mkdir(category_path)\n for url_ in url:\n name = url_[-10:]\n request.urlretrieve(url_,os.path.join(category_path,name))\n return item\n# class MeisPipeline(Imagepipeline):","sub_path":"scrapy/Meis/Meis/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"441700798","text":"from flask import Blueprint, redirect, url_for, render_template, flash\nfrom flaskr.app import db\nfrom flaskr.utils import set_next\nfrom .forms import NewEntryForm\nfrom .models import Entry\nfrom flask_login import login_required, current_user\n\n\nfrontend = Blueprint('frontend', __name__)\n\n\n@frontend.route('/')\n@set_next\ndef show_entries():\n form = NewEntryForm()\n return render_template('frontend/show_entries.html', entries=Entry.query.all(), form=form)\n\n\n@frontend.route('/add', methods=['POST'])\n@login_required\ndef add_entry():\n form = NewEntryForm()\n if form.validate_on_submit():\n new_post = Entry(title=form.title.data, entry=form.entry.data, user=current_user)\n db.session.add(new_post)\n db.session.commit()\n flash('New entry was successfully posted', 'success')\n return redirect(url_for('frontend.show_entries'))\n flash('New entry was NOT added', 'error')\n return render_template('show_entries.html', entries=Entry.query.all(), form=form)\n\n\n@frontend.route('/delete/')\n@login_required\ndef delete_entry(post_id):\n entry = Entry.query.filter_by(id=post_id).first()\n db.session.delete(entry)\n db.session.commit()\n return redirect(url_for('frontend.show_entries'))\n\n\n@frontend.route('/post/')\n@set_next\ndef show_post(post_id):\n entry = Entry.query.get(post_id)\n if current_user.is_authenticated:\n return render_template('frontend/post_detail_authorized.html', entry=entry)\n else:\n return render_template('frontend/post_detail_unauthorized.html', entry=entry)\n\n","sub_path":"flaskr/frontend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"388789358","text":"#!/usr/bin/env python3\n\nfrom itertools import combinations\nfrom typing import List, Set\n\nimport aoc\nfrom more_itertools import pairwise\n\n\ndef main() -> None:\n numbers = aoc.get_integers(9)\n\n stack: List[int] = []\n for index, (appending_number, number) in enumerate(pairwise(numbers)):\n stack.append(appending_number)\n if len(stack) < 25:\n continue\n if len(stack) > 25:\n stack.pop(0)\n\n if any(a + b == number for a, b in combinations(stack, 2)):\n continue\n\n print(f\"No combination found for {number} (#{index}).\")\n\n target = number\n\n for length in range(2, len(numbers)):\n for start_index in range(len(numbers) - length + 1):\n segment = numbers[start_index : start_index + length]\n if sum(segment) == target:\n print(\n f\"Found range {segment} \"\n f\"at [{start_index} : {start_index + length}) \"\n f\"length ({length}).\"\n )\n weakness = min(segment) + max(segment)\n print(f\"The weakness is {weakness}.\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"09.py","file_name":"09.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"585611053","text":"import os\n\nimport numpy as np\nimport pandas as pd\n\nfrom .data_preprocess import Preprocessor\nfrom DeepLearning.keras_models import kmodel\nfrom .plots import Plotter\n\n\n# Train Test Validate\nclass ModelTrainer:\n \"\"\"\n This class object can be initialized using three files i.e. trian, test and validataion CSV\n and the number of features (int) per sample\n Call train_model method to train the model using the data in the train.tsv and val data in the val.tsv\n then this method itself calls plot_accuracy_and_loss method of Plotter class to create plots\n after that test dataset will be used to predict results. finally prediction results will be\n laid out in the out.txt\n \"\"\"\n\n def __init__(self, train_tsv, test_tsv, val_tsv, nfeatures, labels_file, mname, outdir=\"./\"):\n self.train_tsv = train_tsv\n self.test_tsv = test_tsv\n self.val_tsv = val_tsv\n self.nfeatures = nfeatures\n self.labels_file = labels_file\n self.nclasses = self.get_number_of_classes()\n self.mname = mname\n self.outdir = outdir\n\n def get_number_of_classes(self):\n return len([c for c in open(self.labels_file, 'r')])\n\n def run_cnn_model(self, e=20, test=False):\n self.predict_txt = self.mname + \"_1D_CNN_predict.txt\"\n self.result_log = os.path.join(self.outdir, self.mname + \"_1D_CNN.log\")\n\n # get the training data\n Ptrain = Preprocessor(input_files=[self.train_tsv], nfeatures=self.nfeatures, labels_file=self.labels_file)\n Pval = Preprocessor(input_files=[self.val_tsv], nfeatures=self.nfeatures, labels_file=self.labels_file)\n\n train_exp, train_lab = Ptrain.get_cnn_data()\n val_exp, val_lab = Pval.get_cnn_data()\n\n print(val_exp.shape, train_exp.shape)\n\n # initialize model\n cnn_model = kmodel(self.nfeatures, self.nclasses).get_1D_cnn_model()\n\n # train the model\n train_results = cnn_model.fit(train_exp, train_lab, epochs=e, validation_data=(val_exp, val_lab))\n\n pd.DataFrame(train_results.history).to_csv(self.result_log, sep='\\t')\n\n MyPlotter = Plotter(outimg=self.mname + \" 1D CNN\", outdir=self.outdir)\n MyPlotter.plot_accuracy_and_loss(train_results)\n\n if test:\n # get the test data\n Ptest = Preprocessor([self.test_tsv], self.nfeatures, labels_file=self.labels_file)\n test_exp, test_lab = Ptest.get_cnn_data()\n ypred = cnn_model.predict(test_exp)\n self.print_ypred_test_labels(ypred, test_lab)\n print(\"Results dir: \", self.outdir)\n\n return cnn_model, self.get_model_accuracy_and_loss(train_results)\n\n def run_mlp_model(self, e=20, test=False):\n self.predict_txt = self.mname + \"_MLP_predict.txt\"\n self.result_log = os.path.join(self.outdir,self.mname + \"_MLP.log\")\n\n # get the training data\n Ptrain = Preprocessor(input_files=[self.train_tsv], nfeatures=self.nfeatures, labels_file=self.labels_file)\n Pval = Preprocessor(input_files=[self.val_tsv], nfeatures=self.nfeatures, labels_file=self.labels_file)\n\n train_exp, train_lab = Ptrain.get_mlp_data()\n val_exp, val_lab = Pval.get_mlp_data()\n\n # initialize model\n mlp_model = kmodel(self.nfeatures, self.nclasses).get_mlp_model()\n\n # train the model\n train_results = mlp_model.fit(train_exp, train_lab, epochs=e, validation_data=(val_exp, val_lab))\n\n pd.DataFrame(train_results.history).to_csv(self.result_log, sep='\\t')\n\n MyPlotter = Plotter(outimg=self.mname + \" MLP\", outdir=self.outdir)\n MyPlotter.plot_accuracy_and_loss(train_results)\n\n if test:\n # get the test data\n Ptest = Preprocessor([self.test_tsv], self.nfeatures, labels_file=self.labels_file)\n test_exp, test_lab = Ptest.get_mlp_data()\n ypred = mlp_model.predict(test_exp)\n self.print_ypred_test_labels(ypred, test_lab)\n print(\"Results dir: \", self.outdir)\n\n return mlp_model, self.get_model_accuracy_and_loss(train_results)\n\n def get_model_accuracy_and_loss(self, fit_results):\n res = {'accuracy': fit_results.history['accuracy'][-1],\n 'val_accuracy': fit_results.history['val_accuracy'][-1],\n 'loss': fit_results.history['loss'][-1],\n 'val_loss': fit_results.history['val_loss'][-1]}\n\n return res\n\n def print_ypred_test_labels(self, ypred, labels):\n ypred_argmax = np.argmax(ypred, 1)\n lab_argmax = np.argmax(labels, 1)\n outfile = os.path.join(self.outdir, self.predict_txt)\n\n ylen = ypred.shape[0]\n yclasses = ypred.shape[1]\n\n with open(outfile, 'w+') as out:\n for rec in range(ylen):\n for i in range(yclasses):\n out.write(str(ypred[rec, i]))\n out.write('\\t')\n out.write(str(labels[rec, i]))\n out.write('\\t')\n\n out.write('\\t')\n out.write(str(ypred_argmax[rec]))\n out.write('\\t')\n out.write(str(lab_argmax[rec]))\n out.write('\\t')\n out.write(str(ypred_argmax[rec] == lab_argmax[rec]))\n out.write('\\n')\n\n def print_model_training_progress(self, res):\n\n df = pd.DataFrame(res)\n df.to_csv('training_progress.tsv', sep='\\t')\n","sub_path":"DeepLearning/train_keras_models.py","file_name":"train_keras_models.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"304417442","text":"# Copyright 2018 ETH Zurich\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport ipaddress\n\nfrom django import urls\nfrom django.core.exceptions import ValidationError\nfrom django.db import models, transaction\nfrom django.utils.html import format_html\n\nimport scionlab.tasks\nfrom scionlab.models.core import (\n AS,\n Interface,\n Link,\n BorderRouter,\n)\nfrom scionlab.models.vpn import VPNClient\nfrom scionlab.defines import (\n USER_AS_ID_BEGIN,\n USER_AS_ID_END,\n)\nfrom scionlab.util import as_ids\n\n_MAX_LEN_CHOICES_DEFAULT = 16\n_VAGRANT_VM_LOCAL_IP = '10.0.2.15'\n\n\nclass UserASManager(models.Manager):\n def create(self,\n owner,\n attachment_point,\n public_port,\n installation_type,\n as_id=None,\n label=None,\n use_vpn=False,\n public_ip=None,\n bind_ip=None,\n bind_port=None,\n vpn_client_ip=None):\n \"\"\"\n Create a UserAS attached to the given attachment point.\n\n :param User owner: owner of this UserAS\n :param AttachmentPoint attachment_point: the attachment point (AP) to connect to\n :param int public_port: the public port for the connection to the AP\n :param str as_id: optional as_id, if None is given, the next free ID is chosen\n :param str label: optional label\n :param bool use_vpn: use VPN for the connection to the AP\n :param str public_ip: the public IP for the connection to the AP.\n Must be specified if use_vpn is not enabled.\n :param str bind_ip: the bind IP for the connection to the AP (for NAT)\n :param str bind_port: the bind port for the connection to the AP (for NAT port remapping)\n :param str vpn_client_ip: the specific IP address to assign to the VPN client (if vpn).\n A free one if not specified, and ignored if use_vpn == False.\n :returns: UserAS\n \"\"\"\n owner.check_as_quota()\n\n isd = attachment_point.AS.isd\n if as_id:\n as_id_int = as_ids.parse(as_id)\n else:\n as_id_int = self.get_next_id()\n as_id = as_ids.format(as_id_int)\n\n user_as = UserAS(\n owner=owner,\n label=label,\n isd=isd,\n as_id=as_id,\n as_id_int=as_id_int,\n attachment_point=attachment_point,\n public_ip=public_ip,\n bind_ip=bind_ip,\n bind_port=bind_port,\n installation_type=installation_type,\n )\n\n user_as.init_keys()\n user_as.generate_certificate_chain()\n user_as.save()\n user_as.init_default_services(\n public_ip=public_ip,\n bind_ip=_VAGRANT_VM_LOCAL_IP if installation_type == UserAS.VM else bind_ip,\n )\n\n host = user_as.hosts.get()\n if use_vpn:\n vpn_client = attachment_point.vpn.create_client(host=host,\n active=True,\n client_ip=vpn_client_ip)\n interface_client = Interface.objects.create(\n border_router=BorderRouter.objects.first_or_create(host),\n public_ip=vpn_client.ip,\n public_port=public_port,\n )\n interface_ap = Interface.objects.create(\n border_router=attachment_point.get_border_router_for_useras_interface(),\n public_ip=attachment_point.vpn.server_vpn_ip\n )\n else:\n interface_client = Interface.objects.create(\n border_router=BorderRouter.objects.first_or_create(host),\n public_port=public_port,\n bind_port=bind_port\n )\n interface_ap = Interface.objects.create(\n border_router=attachment_point.get_border_router_for_useras_interface(),\n )\n\n Link.objects.create(\n type=Link.PROVIDER,\n interfaceA=interface_ap,\n interfaceB=interface_client\n )\n attachment_point.split_border_routers()\n attachment_point.trigger_deployment()\n\n return user_as\n\n def get_next_id(self):\n \"\"\"\n Get the next available UserAS id.\n \"\"\"\n max_id = self._max_id()\n if max_id is not None:\n if max_id >= USER_AS_ID_END:\n raise RuntimeError('UserAS-ID range exhausted')\n return max(USER_AS_ID_BEGIN, max_id + 1)\n else:\n return USER_AS_ID_BEGIN\n\n def _max_id(self):\n \"\"\"\n :returns: the max `as_id_int` of all UserASes, or None\n \"\"\"\n return next(iter(self.aggregate(models.Max('as_id_int')).values()), None)\n\n\nclass UserAS(AS):\n VM = 'VM'\n PKG = 'PKG'\n SRC = 'SRC'\n INSTALLATION_TYPES = (\n (VM, format_html('Run SCION in a Vagrant virtual machine '\n '(simplest approach)')),\n (PKG, 'SCION installation from packages'),\n (SRC, format_html('SCION installation from sources '\n '(for developers)')),\n )\n\n attachment_point = models.ForeignKey(\n 'AttachmentPoint',\n related_name='user_ases',\n on_delete=models.SET_NULL,\n null=True, # Null on deletion of AP\n blank=False,\n default='' # Invalid default avoids rendering a '----' selection choice\n )\n # These fields are redundant for the network model\n # They are here to retain the values entered by the user\n # if she switches to VPN and back.\n public_ip = models.GenericIPAddressField(null=True, blank=True)\n bind_ip = models.GenericIPAddressField(null=True, blank=True)\n bind_port = models.PositiveIntegerField(null=True, blank=True)\n\n installation_type = models.CharField(\n choices=INSTALLATION_TYPES,\n max_length=_MAX_LEN_CHOICES_DEFAULT,\n default=VM\n )\n\n objects = UserASManager()\n\n class Meta:\n verbose_name = 'User AS'\n verbose_name_plural = 'User ASes'\n\n def get_absolute_url(self):\n return urls.reverse('user_as_detail', kwargs={'pk': self.pk})\n\n def update(self,\n label,\n attachment_point,\n use_vpn,\n public_ip,\n public_port,\n bind_ip,\n bind_port,\n installation_type):\n \"\"\"\n Update this UserAS instance and immediately `save`.\n Updates the related host, interface and link instances and will trigger\n a configuration bump for the hosts of the affected attachment point(s).\n \"\"\"\n prev_ap = self.attachment_point\n\n host = self.hosts.get() # UserAS always has only one host\n\n if self.isd != attachment_point.AS.isd:\n self._change_isd(attachment_point.AS.isd)\n\n host.update(\n public_ip=public_ip,\n bind_ip=_VAGRANT_VM_LOCAL_IP if installation_type == UserAS.VM else bind_ip,\n )\n\n link = self._get_ap_link()\n interface_ap = link.interfaceA\n interface_user = link.interfaceB\n if use_vpn:\n vpn_client = self._create_or_activate_vpn_client(attachment_point.vpn)\n interface_user.update(\n public_ip=vpn_client.ip,\n public_port=public_port,\n bind_port=None\n )\n interface_ap.update(\n border_router=attachment_point.get_border_router_for_useras_interface(),\n public_ip=attachment_point.vpn.server_vpn_ip,\n public_port=None,\n )\n else:\n host.vpn_clients.update(active=False) # deactivate all vpn clients\n interface_user.update(\n public_ip=None,\n public_port=public_port,\n bind_ip=None,\n bind_port=bind_port\n )\n interface_ap.update(\n border_router=attachment_point.get_border_router_for_useras_interface(),\n public_ip=None,\n public_port=None,\n )\n self.attachment_point = attachment_point\n self.installation_type = installation_type\n self.public_ip = public_ip\n self.bind_ip = bind_ip\n self.bind_port = bind_port\n self.save()\n\n if self.attachment_point != prev_ap:\n prev_ap.split_border_routers() # clean up empty BRs\n prev_ap.trigger_deployment()\n self.attachment_point.split_border_routers()\n self.attachment_point.trigger_deployment()\n\n def is_use_vpn(self):\n \"\"\"\n Is this UserAS currently configured with VPN?\n \"\"\"\n return VPNClient.objects.filter(host__AS=self, active=True).exists()\n\n def is_active(self):\n \"\"\"\n Is this UserAS currently active?\n \"\"\"\n return self.interfaces.get().link().active\n\n def get_public_port(self):\n return self.interfaces.get().public_port\n\n def update_active(self, active):\n \"\"\"\n Set the UserAS to be active/inactive.\n This will trigger a deployment of the attachment point configuration.\n \"\"\"\n self.interfaces.get().link().update_active(active)\n self.attachment_point.trigger_deployment()\n\n def _get_ap_link(self):\n # FIXME(matzf): find the correct link to the AP if multiple links present!\n return self.interfaces.get().link()\n\n def _create_or_activate_vpn_client(self, vpn):\n \"\"\"\n Get or create the VPN client config for the given VPN.\n Deactivate all other VPN clients configured on this host.\n :param VPN vpn:\n :returns: VPNClient\n \"\"\"\n host = self.hosts.get()\n host.vpn_clients.exclude(vpn=vpn).update(active=False)\n vpn_client = host.vpn_clients.filter(vpn=vpn).first()\n if vpn_client:\n if not vpn_client.active:\n vpn_client.active = True\n vpn_client.save()\n return vpn_client\n else:\n return vpn.create_client(host, True)\n\n\nclass AttachmentPoint(models.Model):\n AS = models.OneToOneField(\n AS,\n related_name='attachment_point_info',\n on_delete=models.CASCADE,\n )\n vpn = models.OneToOneField(\n 'VPN',\n null=True,\n blank=True,\n related_name='+',\n on_delete=models.SET_NULL\n )\n\n def __str__(self):\n return str(self.AS)\n\n def get_border_router_for_useras_interface(self):\n \"\"\"\n Selects the preferred border router on which the Interfaces to UserASes should be configured\n\n Note: the border router effectively used will be be overwritten by `split_border_routers`.\n\n :returns: a `BorderRouter` of the related `AS`\n \"\"\"\n host = self._get_host_for_useras_attachment()\n return BorderRouter.objects.first_or_create(host)\n\n def check_vpn_available(self):\n \"\"\"\n Raise ValidationError if the attachment point does not support VPN.\n \"\"\"\n if self.vpn is None:\n raise ValidationError(\"Selected attachment point does not support VPN\",\n code='attachment_point_no_vpn')\n\n def trigger_deployment(self):\n \"\"\"\n Trigger the deployment for the attachment point configuration (after the current transaction\n is successfully committed).\n\n The deployment is rate limited, max rate controlled by\n settings.DEPLOYMENT_PERIOD.\n \"\"\"\n for host in self.AS.hosts.iterator():\n transaction.on_commit(lambda: scionlab.tasks.deploy_host_config(host))\n\n def supported_ip_versions(self):\n \"\"\"\n Returns the IP versions for the host where the user ASes will attach to\n \"\"\"\n host = self._get_host_for_useras_attachment()\n return {ipaddress.ip_address(host.public_ip).version}\n\n def _get_host_for_useras_attachment(self):\n \"\"\"\n Finds the host where user ASes would attach to\n \"\"\"\n if self.vpn is not None:\n assert(self.vpn.server.AS == self.AS)\n host = self.vpn.server\n else:\n host = self.AS.hosts.filter(public_ip__isnull=False)[0]\n return host\n\n def split_border_routers(self, max_ifaces=10):\n \"\"\"\n This is a workaround for an (apparent) issue with the border router, that cannot handle more\n than ~12 interfaces per process; this problem seemed to be fixed but is apparently still\n here. This is a hopefully temporary patch.\n\n Will create / remove border routers so no one of them has more than\n the specified limit of interfaces. The links to parent ASes will\n always remain in a different border router.\n :param int max_ifaces The maximum number of interfaces per BR\n \"\"\"\n host = self._get_host_for_useras_attachment()\n # find the *active* interfaces attaching for UserASes (attaching_ifaces) and the rest\n # (infra_ifaces)\n ifaces = host.interfaces.active().order_by('interface_id')\n attaching_ifaces = ifaces.filter(\n link_as_interfaceA__type=Link.PROVIDER,\n link_as_interfaceA__interfaceB__AS__owner__isnull=False)\n infra_ifaces = ifaces.exclude(pk__in=attaching_ifaces)\n\n # attaching non children all to one BR:\n infra_br = BorderRouter.objects.first_or_create(host)\n brs_to_delete = list(\n host.border_routers.order_by('pk').exclude(pk=infra_br.pk).values_list('pk', flat=True))\n brs_to_delete.reverse()\n infra_ifaces.update(border_router=infra_br)\n # attaching children to several BRs:\n attaching_ifaces = attaching_ifaces.all()\n for i in range(0, len(attaching_ifaces), max_ifaces):\n if brs_to_delete:\n br = BorderRouter.objects.get(pk=brs_to_delete.pop())\n else:\n br = BorderRouter.objects.create(host=host)\n for j in range(i, min(len(attaching_ifaces), i + max_ifaces)):\n iface = attaching_ifaces[j]\n iface.border_router = br\n iface.save()\n br.save()\n # squirrel away the *inactive* interfaces somewhere...\n host.interfaces.inactive().update(border_router=infra_br)\n\n # delete old BRs\n if brs_to_delete:\n BorderRouter.objects.filter(pk__in=brs_to_delete).delete()\n","sub_path":"scionlab/models/user_as.py","file_name":"user_as.py","file_ext":"py","file_size_in_byte":15049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"209016764","text":"import uuid\nfrom django.db import models\nimport django_auth_ldap.backend\nfrom django.conf import settings\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save\nfrom django.contrib.auth.models import User\nfrom notifier.events import SendEmailEvent\nfrom notifier.tasks import send_notification\n\n\nclass BaseModel(models.Model):\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n created_date = models.DateTimeField(auto_now_add=True, db_index=True)\n modified_date = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\nclass MskUser(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n groups = models.CharField(max_length=500)\n\n\nclass UserRegistrationRequest(models.Model):\n first_name = models.CharField(max_length=50)\n last_name = models.CharField(max_length=50)\n username = models.CharField(max_length=50, unique=True)\n approved = models.BooleanField(default=None, null=True, blank=True)\n\n def save(self, *args, **kwargs):\n if self.pk is None:\n content = \"User %s %s, with email %s@mskcc.org requested Voyager access.\" % (\n self.first_name,\n self.last_name,\n self.username,\n )\n for email in settings.BEAGLE_NOTIFIER_EMAIL_ABOUT_NEW_USERS.split(\",\"):\n email = SendEmailEvent(\n job_notifier=settings.BEAGLE_NOTIFIER_EMAIL_GROUP,\n email_to=email,\n email_from=settings.BEAGLE_NOTIFIER_EMAIL_FROM,\n subject=\"Registration access\",\n content=content,\n )\n send_notification.delay(email.to_dict())\n super(UserRegistrationRequest, self).save(*args, **kwargs)\n\n\n@receiver(post_save, sender=User)\ndef create_user_profile(sender, instance, created, **kwargs):\n if created:\n MskUser.objects.create(user=instance)\n\n\n@receiver(post_save, sender=User)\ndef save_user_profile(sender, instance, **kwargs):\n instance.mskuser.groups = \"mskcc\"\n instance.mskuser.save()\n\n\ndef populate_user_profile(sender, user=None, ldap_user=None, **kwargs):\n user.email = ldap_user._user_dn\n user.save()\n\n\ndjango_auth_ldap.backend.populate_user.connect(populate_user_profile)\n","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"52154163","text":"from tkinter import Tk, Canvas,CHORD\nmyTk = Tk()\nsmiling = False\ncanvas = Canvas(myTk, width=400, height=400)\ncanvas.pack()\n\ndef mouseclick(event):\n global smiling\n smiling = True\n\ndef mouserelease(event):\n global smiling\n smiling = False\n\ndef drawcharacter():\n canvas.create_oval(150, 150, 250, 250, fill=\"yellow\", width=0)\n canvas.create_rectangle(150, 200, 250, 300, fill=\"yellow\", outline=\"yellow\")\n canvas.create_rectangle(150, 195, 250, 205, fill=\"black\")\n canvas.create_oval(175, 175, 225, 225, fill=\"gray\", width=0)\n canvas.create_oval(180, 180, 220, 220, fill=\"white\", width=0)\n canvas.create_oval(195, 195, 205, 205, fill=\"black\", width=0)\n\ndef drawmouth(smile):\n if smile:\n canvas.create_arc(175, 230, 225, 260, extent=-180, style=CHORD, fill=\"black\")\n else:\n canvas.create_rectangle(175, 250, 225, 255, fill=\"black\")\n\nmyTk.bind(\"\", mouseclick)\nmyTk.bind(\"\", mouserelease)\n\nwhile True:\n canvas.delete(\"all\")\n drawcharacter()\n drawmouth(smiling)\n myTk.update()\n \nmyTk.destroy()\n\n\n","sub_path":"14_tkinter_minion_character.py","file_name":"14_tkinter_minion_character.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"218063780","text":"# Recurrent Neural Network\n\n# Part 1 - Data Preprocessing\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport os\nimport sys\nfrom rd_conf import loadConf\nfrom pathlib import Path\n\n# sys.path.append(Path('.'))\nsys.path.append(os.path.dirname(__file__))\n\n# load config parameters\nconf = loadConf('./conf/pqsConfig.json')\nseqN = conf['seqN']\nepochsVal = conf['epochs']\nbs = conf['batchSize']\n\n# Importing the training set\ndataset_train = pd.read_csv('datasets/japan-225-futures-trend-trn.csv', encoding=\"UTF-8\",\n header=None, names=['time', 'trend', 'price'])\ntrn_sz = dataset_train.shape[0]\ntraining_set = dataset_train.iloc[:, 2:3].values\n\n# Feature Scaling\nfrom sklearn.preprocessing import MinMaxScaler\n\nsc = MinMaxScaler(feature_range=(0, 1))\ntraining_set_scaled = sc.fit_transform(training_set)\n\n# Creating a data structure with 60 timesteps and 1 output\nX_train = []\ny_train = []\nfor i in range(seqN, trn_sz - 1):\n X_train.append(training_set_scaled[i - seqN:i, 0])\n y_train.append(training_set_scaled[i, 0])\nX_train, y_train = np.array(X_train), np.array(y_train)\n\n# Reshaping\nX_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))\n\n# Part 2 - Building the RNN\n\n# Importing Tensorflow\nimport tensorflow as tf\n\n# Initialising the RNN\nregressor = tf.keras.models.Sequential()\n\n# Adding the first LSTM layer and some Dropout regularisation\nregressor.add(tf.keras.layers.LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1], 1)))\nregressor.add(tf.keras.layers.Dropout(0.2))\n\n# Adding a second LSTM layer and some Dropout regularisation\nregressor.add(tf.keras.layers.LSTM(units=50, return_sequences=True))\nregressor.add(tf.keras.layers.Dropout(0.2))\n\n# Adding a third LSTM layer and some Dropout regularisation\nregressor.add(tf.keras.layers.LSTM(units=50, return_sequences=True))\nregressor.add(tf.keras.layers.Dropout(0.2))\n\n# Adding a fourth LSTM layer and some Dropout regularisation\nregressor.add(tf.keras.layers.LSTM(units=50))\nregressor.add(tf.keras.layers.Dropout(0.2))\n\n# Adding the output layer\nregressor.add(tf.keras.layers.Dense(units=1))\n\n# Compiling the RNN\nregressor.compile(optimizer='adam', loss='mean_squared_error')\n\n# Fitting the RNN to the Training set\nregressor.fit(X_train, y_train, epochs=epochsVal, batch_size=bs)\n\n# Part 3 - Making the predictions and visualising the results\n\n# Getting the real stock price of 2017\ndataset_test = pd.read_csv('datasets/japan-225-futures-trend-tst.csv', encoding=\"UTF-8\",\n header=None, names=['time', 'trend', 'price'])\ntst_sz = dataset_test.shape[0]\nreal_stock_price = dataset_test.iloc[:, 2:3].values\n\n# Getting the predicted stock price of 2017\ndataset_total = pd.concat((dataset_train['price'], dataset_test['price']), axis=0)\ninputs = dataset_total[len(dataset_total) - len(dataset_test) - seqN:].values\ninputs = inputs.reshape(-1, 1)\ninputs = sc.transform(inputs)\nX_test = []\nfor i in range(seqN, seqN+tst_sz):\n X_test.append(inputs[i - seqN:i, 0])\nX_test = np.array(X_test)\nX_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))\npredicted_stock_price = regressor.predict(X_test)\npredicted_stock_price = sc.inverse_transform(predicted_stock_price)\n\n# Visualising the results\nplt.plot(real_stock_price, color='red', label='Real Nikkei 225 Future Price')\nplt.plot(predicted_stock_price, color='blue', label='Predicted Nikkei 225 Future Price')\nplt.title('Nikkei 225 Future Price Prediction')\nplt.xlabel('Time')\nplt.ylabel('Nikkei 225 Future Price')\nplt.legend()\n# plt.show()\n\n# env MPLBACKEND=Agg\nplt.savefig(\"test.png\")\n","sub_path":"tf/sante/future/rnn_pqs.py","file_name":"rnn_pqs.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"217226964","text":"import com.ihsan.foundation.pobjecthelper as phelper\n\ndef FormSetDataEx(uideflist,parameters):\n if parameters.DatasetCount == 0: return\n\n param = parameters.FirstRecord.BranchCode\n key = 'PObj:BranchBankPIC#BRANCHCODE=%s'% (param)\n uideflist.SetData('uiPIC', key)\n\n uip = uideflist.uipart1\n rec = uip.Dataset.AddRecord()\n rec.BranchCode = param\n\n\ndef cariPIC(config, parameter, returnpacket):\n status = returnpacket.CreateValues(\n ['IsErr',0],\n ['ErrMessage',''],\n ['BranchCode',''],\n ['Pesan',0],\n )\n try:\n param = parameter.FirstRecord\n BranchCode=param.BranchCode\n sOQL = \" \\\n select from BranchBankPIC \\\n [ BranchCode = :BranchCode ] \\\n ( BranchCode, \\\n BranchName, \\\n self ); \\\n \"\n\n oql = config.OQLEngine.CreateOQL(sOQL)\n oql.SetParameterValueByName('BranchCode', BranchCode)\n oql.ApplyParamValues()\n\n oql.active = 1\n ds = oql.rawresult\n\n if ds.BranchCode in (None,'') :\n status.Pesan = 0\n #raise 'aa', 'kosong'\n else :\n status.Pesan = 1\n status.BranchCode = ds.BranchCode\n\n except:\n status.IsErr = 1\n status.ErrMessage = str(sys.exc_info()[1])\n raise\n\n","sub_path":"dialogs/LKMS/fPeragaanPICCabang_data.py","file_name":"fPeragaanPICCabang_data.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"572513968","text":"\nfrom api_calls import get, stream, post\nimport json\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport strategies\nimport matplotlib.pyplot as plt\n\n\nclass FXBot(object):\n def __init__(self):\n self.position = 0\n self.df = pd.DataFrame()\n self.connected = True\n self.balance = self.get_account_balance()\n self.units = self.balance / 100.0\n\n @staticmethod\n def get_account_balance(**params):\n # Get account balance\n summary = get(\"summary\", params)\n balance = pd.to_numeric(summary['account']['balance'])\n return balance\n\n def update_account_balance(self):\n # Update account balance\n self.balance = self.get_account_balance()\n\n def update_units(self):\n # Update units\n self.units = self.balance / 100.0\n\n def update_account_data(self):\n # Update account data\n self.update_account_balance()\n self.update_units()\n\n @staticmethod\n def get_position(**params):\n # Get positions from api\n positions = get(\"openPositions\", params)\n return positions\n\n @staticmethod\n def get_instrument(**params):\n # Get instrument from api\n instrument = get(\"instruments\", params)\n return instrument\n\n @staticmethod\n def get_price(**params):\n # Get price stream for given instrument\n price = get(\"pricing\", params)\n return price\n\n @staticmethod\n def create_order(side, instrument, units, type='MARKET'):\n if side == 'buy':\n units = abs(int(units))\n elif side == 'sell':\n units = int(units) * -1\n else:\n raise Exception('order type not recognised, use buy or sell!')\n\n params = {\n 'instrument': instrument,\n 'units': units,\n 'type': type\n }\n\n # Create an order\n order = post(\"orders\", params)\n\n print('\\n', order)\n\n def on_success(self, data, instrument, strat, strat_params):\n # appends the new tick data to the DataFrame object\n self.df = self.df.append(data)\n # transforms the time information to a DatetimeIndex object\n self.df.index = pd.DatetimeIndex(self.df['time'])\n # resamples the data set to a new, homogeneous interval\n dfr = self.df.resample('1T').last().dropna()\n\n # run the given strategy\n strategy_to_run = getattr(strategies, strat)\n output = strategy_to_run(dfr, strat_params)\n\n if output == 'buy':\n # go long\n if self.position == 0:\n self.create_order('buy', instrument, self.units)\n elif self.position == -1:\n self.create_order('buy', instrument, self.units * 2)\n self.position = 1\n elif output == 'sell':\n # go short\n if self.position == 0:\n self.create_order('sell', instrument, self.units)\n elif self.position == 1:\n self.create_order('sell', instrument, self.units * 2)\n self.position = -1\n\n def get_history(self, **params):\n # Get price history for given instrument\n history = get(\"candles\", params)['candles']\n\n # Insert data in DataFrame\n df = pd.DataFrame(history)\n\n # pull out the close ask prices\n df['closeAsk'] = df['ask'].apply(self.get_close)\n df['closeBid'] = df['bid'].apply(self.get_close)\n df['closeMid'] = df['mid'].apply(self.get_close)\n\n # convert columns to required formats\n df['time'] = pd.to_datetime(df['time'])\n df['closeAsk'] = pd.to_numeric(df['closeAsk'])\n df['closeBid'] = pd.to_numeric(df['closeBid'])\n df['closeMid'] = pd.to_numeric(df['closeMid'])\n\n return df[['closeAsk', 'closeMid', 'closeBid', 'time']]\n\n def seed_history(self, data):\n if not self.df.empty:\n raise Exception('DataFrame not empty, clear first!')\n\n data.rename(columns={'closeAsk': 'ask', 'closeBid': 'bid'}, inplace=True)\n\n self.df = data\n\n @staticmethod\n def test_strategy(input_df, momentum_list):\n sns.set()\n input_df['returns'] = np.log(input_df['closeAsk'] / input_df['closeAsk'].shift(1))\n\n cols = []\n for m in momentum_list:\n col = 'position_%s' % m\n input_df[col] = np.sign(input_df['returns'].rolling(m).mean())\n cols.append(col)\n\n strats = ['returns']\n\n for col in cols:\n strat = 'strategy_%s' % col.split('_')[1]\n input_df[strat] = input_df[col].shift(1) * input_df['returns']\n strats.append(strat)\n\n input_df[strats].dropna().cumsum().apply(np.exp).plot()\n plt.show()\n\n @staticmethod\n def get_close(dic):\n return dic.get('c', None)\n\n def get_price_stream(self, **params):\n # Get price stream for given instrument\n price_stream = stream(\"pricing/stream\", params)\n\n instrument = params.get('instruments')\n\n strategy = params.get('strategy')\n\n strat_params = params.get('strat_params')\n\n for line in price_stream.iter_lines():\n if not self.connected:\n break\n if line:\n resp = json.loads(line.decode(\"utf-8\"))\n if resp['type'] == 'HEARTBEAT':\n pass\n elif resp['type'] == 'PRICE':\n ask = float(resp['asks'][0]['price'])\n bid = float(resp['bids'][0]['price'])\n time = pd.to_datetime(resp['time'])\n df = pd.DataFrame([{'bid': bid, 'ask': ask, 'time': time}])\n self.on_success(df, instrument, strategy, strat_params)\n\n def disconnect(self):\n self.connected = False\n","sub_path":"fx_bot/fx_bot.py","file_name":"fx_bot.py","file_ext":"py","file_size_in_byte":5776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"190316862","text":"#!/usr/bin/env python3\n\nimport argparse\nfrom modules.main import Main\nimport time\n\n__author__ = \"Ram Prakash Jayapalan\"\n__copyright__ = \"Copyright 2017\"\n__license__ = 'MIT'\n__lastupdated__ = \"Jun 26, 2017\"\n__pyversion__ = \"3.6.x\"\n\n\nclass App(object):\n\n def __init__(self, configfile, testfile, nice):\n \"\"\"\n Constructor\n :param configfile: Full path to the config file\n :param testfile: Full path to the test file\n \"\"\"\n self.configfile = configfile\n self.testfile = testfile\n self.nice = nice\n\n def validate(self):\n \"\"\"\n Function to validate the test file\n :return: result\n \"\"\"\n result = Main(self.configfile, self.testfile, self.nice).getresult()\n return result\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", \"-c\")\n parser.add_argument(\"--test\", \"-t\")\n parser.add_argument(\"--nice\", action=\"store_true\")\n\n args = parser.parse_args()\n\n configfile = args.config\n testfile = args.test\n nice = args.nice\n\n starttime = time.time()\n\n app = App(configfile, testfile, nice).validate()\n\n runtime = (time.time() - starttime) // 60 # runtime in mins\n\n print(app)\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"89317881","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\nclass AccountInvoice(models.Model):\n _inherit = \"account.invoice\"\n\n @api.model\n def _get_default_warehouse(self):\n warehouse_id = self.env['stock.warehouse'].search([('company_id', '=', self.env.user.company_id.id)], limit=1)\n return warehouse_id\n\n @api.model\n def create_move(self, invoice, picking_type_id, location_id, location_dest_id):\n StockMove = self.env['stock.move']\n picking_id = self.env['stock.picking'].create({\n 'partner_id': invoice.partner_id.id,\n 'date': fields.datetime.now(), \n 'company_id': invoice.company_id.id,\n 'picking_type_id': picking_type_id.id,\n 'location_id': location_id.id,\n 'location_dest_id': location_dest_id.id,\n 'state': 'draft',\n 'origin': invoice.number,\n })\n\n invoice.picking_id = picking_id.id\n for line in invoice.invoice_line_ids:\n if line.product_id and line.product_id.type != 'service':\n StockMove.create({\n 'product_id': line.product_id.id,\n 'product_uom_qty': line.quantity,\n 'product_uom': line.product_id.uom_id.id,\n 'date': fields.datetime.now(),\n 'date_expected': fields.datetime.now(),\n 'picking_id': picking_id.id,\n 'state': 'draft',\n 'name': line.name,\n 'location_id': location_id.id,\n 'location_dest_id': location_dest_id.id,\n 'quantity_done': line.quantity,\n })\n '''picking_id.action_confirm()\n picking_id.action_assign()\n if picking_id.state != 'assigned':\n picking_id.force_assign()\n picking_id.button_validate()'''","sub_path":"addons/invoice_with_stock_move_lote_and_series/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"601357309","text":"from django.conf import settings\nfrom django.dispatch import receiver\nfrom django.db.models import signals\n\nfrom problem.models import Problem\n\n\n@receiver(signals.pre_save, sender=Problem)\ndef update_difficulty(sender, instance=None, **kwargs):\n \"\"\"Dynamically update difficulty of problem.\"\"\"\n # Only submission count bigger than base count will trigger this\n # action.\n base_count = settings.DIFFICULTY_BASE_SUBMISSIONS_COUNT\n\n # Invalid problem is not in account.\n if getattr(instance, 'pk', None) \\\n and instance.is_valid \\\n and instance.submission_number >= base_count:\n rate = instance.accepted_number / instance.submission_number\n for difficulty, level in settings.DIFFICULTY_RATE_MAP.items():\n if rate <= level:\n instance.difficulty = difficulty\n\n return instance\n","sub_path":"problem/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"27332473","text":"\"\"\"\n This is dqn which has Q and D estimator.\n Different from V1, we give an agent penalty(reward) equal 1.0 at terminal state.\n More reward, more chance to dies (because of discount factor).\n The agent must choose the action that provides the greatest U-value.\n where\n U = Q - bD ; b is constant (change plus sign to minus)\n\n This version also add sigmoid function to the D network output\n to prevent bootstrap error (D is negative) from V2.\n\"\"\"\n\nimport os\nimport time\nimport random\nimport cv2\nimport argparse\nimport numpy as np\nimport tensorflow as tf\n\nfrom baselines.common import set_global_seeds, explained_variance\nfrom baselines.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom baselines.common.atari_wrappers import make_atari, wrap_deepmind\n\nfrom utils import *\n\n\nclass Model(object):\n def __init__(self, num_actions, death_constant, risk_constant, sess=None, multi_gpu=False, no_gpu=False):\n if multi_gpu:\n device_1 = '/device:GPU:0'\n device_2 = '/device:GPU:1'\n else:\n device_1 = '/device:GPU:0'\n device_2 = '/device:GPU:0'\n\n if no_gpu:\n device_1 = '/cpu:0'\n device_2 = '/cpu:0'\n\n with tf.variable_scope('main'):\n with tf.device(device_1):\n # make it int32 and divide by 255.\n self.x = x = tf.placeholder(tf.uint8, [None, 84, 84, 4], name=\"input\")\n self.batch_size = tf.shape(x)[0] # = (nenvs) if perform action || = (args.bs) if train replay buffer\n x = tf.cast(x, tf.float32)/255.\n\n # convolution layer\n x = tf.nn.relu( conv2d(x, 32, \"l1\", [8,8], [4,4]) )\n x = tf.nn.relu( conv2d(x, 64, \"l2\", [4,4], [2,2]) )\n conv_out = tf.nn.relu( conv2d(x, 64, \"l3\", [3,3], [1,1]) )\n\n # fully connected layer\n x = tf.nn.relu(linear(flatten(conv_out), 512, \"hidden\", normalized_columns_initializer(1.0)))\n self.q_values = linear(x, num_actions, \"q_out\", normalized_columns_initializer(1.0))\n\n # death predictor\n x = tf.nn.relu(linear(flatten(conv_out), 512, \"hidden2\", normalized_columns_initializer(1.0)))\n x = linear(x, num_actions, \"d_out\", normalized_columns_initializer(1.0))\n self.d_values = tf.math.sigmoid(x, \"sigmoid\")\n\n # utility value\n x = tf.nn.relu(linear(flatten(conv_out), 512, \"hidden3\", normalized_columns_initializer(1.0)))\n self.u_values = linear(x, num_actions, \"u_out\", normalized_columns_initializer(1.0))\n\n self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)\n\n with tf.variable_scope('target'):\n with tf.device(device_2):\n self.x_target = x = tf.placeholder(tf.uint8, [None, 84, 84, 4], name=\"input_target\")\n x = tf.cast(x, tf.float32)/255.\n\n # convolution layer\n x = tf.nn.relu( conv2d(x, 32, \"l1\", [8,8], [4,4]) )\n x = tf.nn.relu( conv2d(x, 64, \"l2\", [4,4], [2,2]) )\n conv_out = tf.nn.relu( conv2d(x, 64, \"l3\", [3,3], [1,1]) )\n\n # fully connected layer\n x = tf.nn.relu(linear(flatten(conv_out), 512, \"hidden\", normalized_columns_initializer(1.0)))\n self.q_values_target = linear(x, num_actions, \"q_out\", normalized_columns_initializer(1.0))\n\n # death predictor\n x = tf.nn.relu(linear(flatten(conv_out), 512, \"hidden2\", normalized_columns_initializer(1.0)))\n x = linear(x, num_actions, \"d_out\", normalized_columns_initializer(1.0))\n self.d_values_target = tf.math.sigmoid(x, \"sigmoid\")\n\n # utility value\n x = tf.nn.relu(linear(flatten(conv_out), 512, \"hidden3\", normalized_columns_initializer(1.0)))\n self.u_values_target = linear(x, num_actions, \"u_out\", normalized_columns_initializer(1.0))\n\n self.var_list_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)\n\n self.sync = tf.group(\n *(\n [v1.assign(v2) for v1, v2 in zip(self.var_list_target, self.var_list)]\n ))\n\n # act greedily with respect to the utility\n # add small random action to avoid stranded behaviour\n\n self.eps = eps = tf.placeholder(tf.float32, [1])\n # eps = 0.01\n deterministic_actions = tf.argmax(self.u_values, axis=1) # greedy\n random_actions = tf.random_uniform(tf.stack([self.batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)\n chose_random = tf.random_uniform(tf.stack([self.batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps\n self.actions = tf.where(chose_random, random_actions, deterministic_actions)\n\n # train\n self.rewards_t = tf.placeholder(tf.float32, [None], name=\"reward\")\n self.actions_t = tf.placeholder(tf.int32, [None], name=\"action\")\n self.done_mask = tf.placeholder(tf.float32, [None], name=\"done\")\n\n #######################################################################\n # This section compute Q-learning target and error\n #######################################################################\n\n # q scores for actions, we know were selected\n q_t_selected = tf.reduce_sum(self.q_values * tf.one_hot(self.actions_t, num_actions), 1)\n # target\n q_target = self.q_values_target\n u_target = self.u_values_target\n # select best action with utility head (similar to double dqn)\n tp1_best_actions = tf.argmax(u_target, 1)\n\n q_tp1_best = tf.reduce_sum(q_target * tf.one_hot(tp1_best_actions, num_actions), 1)\n q_tp1_best_masked = (1.0 - self.done_mask) * q_tp1_best\n\n # compute RHS of bellman equation\n gamma = 0.995 # was 0.99\n q_t_selected_target = self.rewards_t + gamma * q_tp1_best_masked\n\n # compute error\n td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)\n\n # huber loss\n delta = 1.0\n errors = tf.where(tf.abs(td_error) < delta,\n tf.square(td_error) * 0.5,\n delta * (tf.abs(td_error) - 0.5 * delta))\n\n errors = tf.reduce_mean(errors)\n\n ##################################################################\n # This section compute Death estimation\n ###################################################################\n\n # q scores for actions, we know were selected\n self.d_t_selected = tf.reduce_sum(self.d_values * tf.one_hot(self.actions_t, num_actions), 1)\n # target\n d_target = self.d_values_target\n\n d_tp1_best = tf.reduce_sum(d_target * tf.one_hot(tp1_best_actions, num_actions), 1)\n d_tp1_best_masked = (1.0 - self.done_mask) * d_tp1_best\n\n # define death reward\n r = self.done_mask\n\n # compute RHS of bellman equation\n gamma = 0.995 # was 0.99\n d_t_selected_target = r + gamma * d_tp1_best_masked\n\n # compute error\n d_td_error = self.d_t_selected - tf.stop_gradient(d_t_selected_target)\n\n # huber loss\n delta = 1.0\n d_errors = tf.where(tf.abs(d_td_error) < delta,\n tf.square(d_td_error) * 0.5,\n delta * (tf.abs(d_td_error) - 0.5 * delta))\n\n d_errors = tf.reduce_mean(d_errors)\n\n ####################################################################\n # This section compute utility value target and error\n ####################################################################\n b = death_constant # death constant\n u_t_selected = tf.reduce_sum(self.u_values * tf.one_hot(self.actions_t, num_actions), 1)\n u_t_selected_target = q_t_selected_target - (b * d_t_selected_target)\n\n # compute error\n u_td_error = u_t_selected - tf.stop_gradient(u_t_selected_target)\n # huber loss\n u_errors = tf.where(tf.abs(u_td_error) < delta,\n tf.square(u_td_error) * 0.5,\n delta * (tf.abs(u_td_error) - 0.5 * delta))\n\n u_errors = tf.reduce_mean(u_errors)\n\n ###################################################################\n\n lr = 0.0001 # 1.0e-4\n optimizer = tf.train.AdamOptimizer(learning_rate=lr)\n self.optimize_expr = optimizer.minimize(errors + d_errors + u_errors)\n\n self.sess = sess\n tf.global_variables_initializer().run(session=self.sess)\n\n tf.summary.scalar(\"model/q_loss\", errors)\n tf.summary.scalar(\"model/d_loss\", d_errors)\n tf.summary.scalar(\"model/u_loss\", u_errors)\n tf.summary.scalar(\"model/mean_q_values\", tf.reduce_mean(q_t_selected))\n tf.summary.scalar(\"model/mean_d_values\", tf.reduce_mean(self.d_t_selected))\n tf.summary.scalar(\"model/mean_u_values\", tf.reduce_mean(u_t_selected))\n self.summary_op = tf.summary.merge_all()\n\n def act(self, obs, epsilon):\n # sample an action\n return self.sess.run(self.actions,\n feed_dict={self.x: obs, self.eps: epsilon})\n # feed_dict={self.x : obs})\n\n\n def train(self, obses, actions, rewards, obses_tp1, dones):\n # train\n return self.sess.run([self.optimize_expr, self.summary_op],\n feed_dict={self.x : obses,\n self.actions_t : actions,\n self.rewards_t : rewards,\n self.x_target : obses_tp1,\n self.done_mask : dones\n })\n\n def update_target(self):\n return self.sess.run(self.sync)\n\n\nclass ReplayBuffer(object):\n def __init__(self, size):\n self._storage = []\n self._maxsize = size # 200,000\n self._next_idx = 0\n\n def __len__(self):\n return len(self._storage)\n\n def add(self, obs_t, action, reward, obs_tp1, done):\n # oldest data will be replaced\n data = (obs_t, action, reward, obs_tp1, done)\n if self._next_idx >= len(self._storage):\n self._storage.append(data)\n else:\n self._storage[self._next_idx] = data\n self._next_idx = (self._next_idx + 1) % self._maxsize\n\n def add_batch(self, obs_t, actions, rewards, obs_tp1, dones):\n for i in range(len(dones)):\n self.add(obs_t[i], actions[i], rewards[i], obs_tp1[i], dones[i])\n\n def _encode_sample(self, idxes):\n obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []\n for i in idxes:\n data = self._storage[i]\n obs_t, action, reward, obs_tp1, done = data\n obses_t.append(np.array(obs_t, copy=False))\n actions.append(np.array(action, copy=False))\n rewards.append(reward)\n obses_tp1.append(np.array(obs_tp1, copy=False))\n dones.append(done)\n return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones)\n\n def sample(self, batch_size):\n idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]\n return self._encode_sample(idxes)\n\n\nclass LinearSchedule(object):\n def __init__(self, schedule_timesteps, final_p, initial_p=1.0):\n self.schedule_timesteps = schedule_timesteps\n self.final_p = final_p\n self.initial_p = initial_p\n\n def value(self, t):\n fraction = min(float(t) / self.schedule_timesteps, 1.0)\n return self.initial_p + fraction * (self.final_p - self.initial_p)\n\n\nclass DQN:\n def __init__(self,\n env,\n model,\n summary_writer,\n batch_size,\n max_timesteps,\n train_freq,\n learning_starts,\n target_network_update_freq,\n buffer_size,\n nstack,\n visualise=False,\n logdir=None,\n sess=None,\n saver=None,\n ):\n\n self.env = env\n self.nenvs = env.num_envs\n self.model = model\n nh, nw, nc = env.observation_space.shape\n\n # Create the replay buffer\n self.replay_buffer = ReplayBuffer(buffer_size)\n\n # Create the schedule for exploration starting from 1.\n exploration_fraction = 0.1\n self.exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * max_timesteps),\n initial_p=1.0,\n final_p=0.01)\n\n # Initialise observation\n self.obs = np.zeros((self.nenvs, nh, nw, nstack), dtype=np.uint8)\n obs = env.reset()\n self.update_obs(obs)\n self.next_obs = np.zeros((self.nenvs, nh, nw, nstack), dtype=np.uint8)\n self.update_next_obs(obs)\n self.tstart = time.time()\n self.batch_size = batch_size\n self.max_timesteps = max_timesteps\n self.learning_starts = learning_starts\n self.target_network_update_freq = target_network_update_freq\n self.train_freq = train_freq\n self.summary_writer = summary_writer\n self.visualise = visualise\n self.saver = saver\n self.logdir = logdir\n self.previous_reward = 0.0\n self.sess = sess\n\n def update_obs(self, obs):\n self.obs = np.roll(self.obs, shift=-1, axis=3)\n self.obs[:, :, :, -1] = obs[:, :, :, 0]\n\n def update_next_obs(self, obs):\n self.next_obs = np.roll(self.next_obs, shift=-1, axis=3)\n self.next_obs[:, :, :, -1] = obs[:, :, :, 0]\n\n def learn(self):\n\n # Initial setup\n episode_reward = 0.0\n episode_clip_reward = 0.0\n episode_length = 0.0\n\n for t in range( self.max_timesteps//self.nenvs + 1):\n # choose actions\n actions = self.model.act(self.obs, [self.exploration.value(int(t*self.nenvs))])\n # actions = self.model.act(self.obs)\n\n # act on env\n obs, rewards, dones, _ = self.env.step(actions)\n\n # clip rewards\n clip_rewards = np.sign(rewards)\n\n # Store transition in the replay buffer\n for n, done in enumerate(dones):\n if done:\n self.next_obs[n] = self.next_obs[n]*0\n self.update_next_obs(obs)\n\n self.replay_buffer.add_batch(self.obs, actions, clip_rewards, self.next_obs, dones)\n\n for n, done in enumerate(dones):\n if done:\n self.obs[n] = self.obs[n]*0\n self.update_obs(obs)\n\n if t > self.learning_starts and t % self.train_freq == 0:\n # Train network periodically\n train_obses_t, train_actions, train_rewards, \\\n train_obses_tp1, train_dones = self.replay_buffer.sample(self.batch_size)\n _, summary = self.model.train(train_obses_t,\n train_actions,\n train_rewards,\n train_obses_tp1,\n train_dones)\n\n self.summary_writer.add_summary(summary, global_step=t*self.nenvs)\n self.summary_writer.flush()\n\n if t > self.learning_starts and t % self.target_network_update_freq == 0:\n # Update target network periodically.\n self.model.update_target()\n\n # collect summary\n episode_reward += rewards[0]\n episode_clip_reward += clip_rewards[0]\n episode_length += 1\n\n # Report summary\n if dones[0]:\n print(\"done : %d\"%t)\n nseconds = time.time()-self.tstart\n fps = int((t*self.nenvs)/nseconds)\n # summary\n summary = tf.Summary()\n summary.value.add(tag='global/episode_reward', simple_value=episode_reward)\n summary.value.add(tag='global/episode_cliped_reward', simple_value=episode_clip_reward)\n summary.value.add(tag='global/episode_length', simple_value=episode_length)\n summary.value.add(tag='global/fps', simple_value=fps)\n self.summary_writer.add_summary(summary, global_step=t*self.nenvs)\n self.summary_writer.flush()\n\n # save best model\n if episode_reward > self.previous_reward:\n self.saver.save(self.sess, self.logdir + \"/best/best_model.ckpt\")\n self.previous_reward = episode_reward\n\n # reset episode_reward\n episode_reward = 0.0\n episode_clip_reward = 0.0\n episode_length = 0\n\n if t == self.max_timesteps//(2*self.nenvs):\n # save at half the training time\n self.saver.save(self.sess, self.logdir + \"/half/half_model.ckpt\")\n print('Save half training model')\n\n # visualisation for debugging process\n if self.visualise:\n vis = cv2.resize(obs[0,:,:,0] , (500,500))\n print(episode_reward)\n cv2.imshow('img', vis)\n cv2.waitKey(2)\n\n # save final model\n self.saver.save(self.sess, self.logdir + \"/final/final_model.ckpt\")\n print('Save final model')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--b', help='death constant', type=float, default=1)\n parser.add_argument('--bs', help='batch size', type=int, default=512)\n parser.add_argument('--c', help='risk constant', type=float, default=0.3)\n parser.add_argument('--env', help='environment ID', default='MontezumaRevengeNoFrameskip-v4')\n parser.add_argument('--gpu_id', help='gpu device ID', default=\"0\")\n parser.add_argument('--log_dir', help='experiment directory', default='./experiments')\n parser.add_argument('--multi_gpu', help='use multiple GPUs', action='store_true')\n parser.add_argument('--num_workers', help='number of workers', type=int, default=12)\n parser.add_argument('--seed', help='random seed', type=int, default=0)\n parser.add_argument('--time_steps', help='max time step for training', type=int, default=int(100e6))\n parser.add_argument('--visualise', help='show game screen', action='store_true')\n args = parser.parse_args()\n print(\"Initialise environment...\")\n\n # GPUs setting\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(args.gpu_id)\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n\n # Experiment results\n filename = os.path.basename(__file__)[:-3]\n experiment_name = filename + \"/\" + args.env + '/b_' + str(args.b) + '/bs_' + str(args.bs) + '/seed_' + str(args.seed)\n log_dir = os.path.join(args.log_dir, experiment_name)\n\n # Create OpenNI atari-py function\n def make_env(rank):\n def _thunk():\n env = make_atari(args.env)\n env.seed(args.seed + rank)\n return wrap_deepmind(env, episode_life=False, clip_rewards=False)\n return _thunk\n\n # Create environments\n set_global_seeds(args.seed)\n env = SubprocVecEnv([make_env(i) for i in range(args.num_workers)])\n\n # Start tf session\n print(\"Starting session...\")\n with tf.Session(config=config) as sess:\n # Create neural network models\n model = Model(num_actions=env.action_space.n, # 18 actions for atari\n death_constant=args.b,\n risk_constant=args.c,\n sess=sess,\n multi_gpu=args.multi_gpu)\n\n # Start training\n summary_writer = tf.summary.FileWriter(log_dir)\n saver = tf.train.Saver()\n dqn = DQN(env,\n model,\n summary_writer,\n batch_size=args.bs,\n max_timesteps=args.time_steps,\n train_freq=4,\n learning_starts=10000,\n target_network_update_freq=2000,\n buffer_size=200000,\n nstack=4,\n visualise=args.visualise,\n logdir=log_dir,\n sess=sess,\n saver=saver)\n\n print(\"Start!\")\n print(\"========================================================\")\n dqn.learn()\n\n env.close()\n print(\"========================================================\")\n print(\"env closed!\")\n","sub_path":"dqn_QD_v2_2.py","file_name":"dqn_QD_v2_2.py","file_ext":"py","file_size_in_byte":20782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"595485242","text":"import requests\nfrom db_connector import cursor\n\n\ndef test_back(user_id, user_name):\n # POST a new user to API\n json_data = {\"user_name\": user_name}\n new_user = requests.post(\"http://127.0.0.1:5000/users/{}\".format(user_id), json=json_data)\n if new_user.status_code == 500:\n return False\n\n # GET user\n get_user = requests.get(\"http://127.0.0.1:5000/users/{}\".format(user_id))\n data = get_user.json()\n my_result = data['user_name']\n # Check if user name matches and response code is good\n if (my_result != json_data['user_name']) or (get_user.status_code != 200):\n return False\n\n # Query for user\n try:\n cursor.execute('SELECT user_name FROM sys.users WHERE user_id = '\"{}\"''.format(user_id))\n my_result = cursor.fetchall()\n get_error = my_result[0]\n except IndexError:\n return False\n\n return True\n\n\ntest_back(2, 'Boris')\n","sub_path":"pythonProject/backend_testing.py","file_name":"backend_testing.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"596479062","text":"#!/usr/bin/env python\n\nimport argparse\nimport chainer\nimport fcis\nimport os.path as osp\n\nimport _init_paths # NOQA\n\nfrom utils.load_model import load_param\n\n\nfilepath = osp.abspath(osp.dirname(__file__))\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset')\n args = parser.parse_args()\n\n if args.dataset == 'coco':\n n_class = 81\n model = fcis.models.FCISResNet101(n_class)\n prefix = filepath + '/../model/fcis_coco'\n epoch = 0\n elif args.dataset == 'voc':\n n_class = 21\n model = fcis.models.FCISResNet101(\n n_class,\n ratios=(0.5, 1.0, 2.0),\n anchor_scales=(8, 16, 32),\n rpn_min_size=16)\n prefix = filepath + '/../model/e2e'\n epoch = 21\n else:\n print('dataset must be coco or voc')\n arg_params, aux_params = load_param(\n prefix, epoch, process=True)\n model = convert(model, arg_params, aux_params)\n chainer.serializers.save_npz(\n './fcis_{}.npz'.format(args.dataset), model)\n\n\ndef convert(model, arg_params, aux_params):\n conv_branch = {\n 'branch2a': 'conv1',\n 'branch2b': 'conv2',\n 'branch2c': 'conv3',\n 'branch1': 'conv4',\n }\n\n bn_branch = {\n 'branch2a': 'bn1',\n 'branch2b': 'bn2',\n 'branch2c': 'bn3',\n 'branch1': 'bn4',\n }\n\n # convolution weight\n for name, value in arg_params.items():\n value = value.asnumpy()\n # ResNetC1\n if name.startswith('conv1'):\n value = value[:, ::-1, :, :]\n assert model.res1.conv1.W.array.shape == value.shape, name\n model.res1.conv1.W.array = value\n # ResNetC2-5\n elif name.startswith('res'):\n block_name, branch_name, _ = name.split('_')\n res_name = block_name[:4]\n if block_name[4] == 'a':\n bottle_num = block_name[4:]\n elif block_name[4] == 'b':\n bottle_num = block_name[4:]\n if bottle_num == 'b':\n bottle_num = 'b1'\n elif block_name[4] == 'c':\n bottle_num = 'b2'\n bottle_name = '{0}_{1}'.format(res_name, bottle_num)\n res = getattr(model, res_name)\n bottle = getattr(res, bottle_name)\n layer = getattr(bottle, conv_branch[branch_name])\n assert layer.W.array.shape == value.shape, name\n layer.W.array = value\n # RPN\n elif name.startswith('rpn'):\n _, layer_name, _, data_type = name.split('_')\n if layer_name == 'conv':\n layer = model.rpn.conv1\n elif layer_name == 'cls':\n layer = model.rpn.score\n elif layer_name == 'bbox':\n layer = model.rpn.loc\n\n if data_type == 'weight':\n if layer_name == 'cls':\n value = value.reshape((2, -1, 512, 1, 1))\n value = value.transpose((1, 0, 2, 3, 4))\n value = value.reshape((-1, 512, 1, 1))\n elif layer_name == 'bbox':\n value = value.reshape((-1, 4, 512, 1, 1))\n value = value[:, [1, 0, 3, 2]]\n value = value.reshape((-1, 512, 1, 1))\n assert layer.W.array.shape == value.shape, name\n layer.W.array = value\n elif data_type == 'bias':\n if layer_name == 'cls':\n value = value.reshape((2, -1))\n value = value.transpose((1, 0))\n value = value.reshape((-1,))\n elif layer_name == 'bbox':\n value = value.reshape((-1, 4))\n value = value[:, [1, 0, 3, 2]]\n value = value.reshape((-1,))\n assert layer.b.array.shape == value.shape, name\n layer.b.array = value\n # psroi_conv1\n elif name.startswith('conv_new'):\n data_type = name.split('_')[3]\n layer = model.psroi_conv1\n if data_type == 'weight':\n assert layer.W.array.shape == value.shape, name\n layer.W.array = value\n elif data_type == 'bias':\n assert layer.b.array.shape == value.shape, name\n layer.b.array = value\n # psroi_conv2\n elif name.startswith('fcis_cls_seg'):\n data_type = name.split('_')[3]\n layer = model.psroi_conv2\n if data_type == 'weight':\n assert layer.W.array.shape == value.shape, name\n layer.W.array = value\n elif data_type == 'bias':\n assert layer.b.array.shape == value.shape, name\n layer.b.array = value\n # psroi_conv3\n elif name.startswith('fcis_bbox'):\n data_type = name.split('_')[2]\n layer = model.psroi_conv3\n if data_type == 'weight':\n value = value.reshape((2, 4, 7*7, 1024, 1, 1))\n value = value[:, [1, 0, 3, 2]]\n value = value.reshape((-1, 1024, 1, 1))\n assert layer.W.array.shape == value.shape, name\n layer.W.array = value\n elif data_type == 'bias':\n value = value.reshape((2, 4, 7*7))\n value = value[:, [1, 0, 3, 2]]\n value = value.reshape((-1,))\n assert layer.b.array.shape == value.shape, name\n layer.b.array = value\n else:\n layer_name, branch_name, data_type = name.split('_')\n if layer_name == 'bn':\n layer = model.res1.bn1\n if data_type == 'beta':\n assert layer.beta.array.shape == value.shape\n layer.beta.array = value\n elif data_type == 'gamma':\n assert layer.gamma.array.shape == value.shape\n layer.gamma.array = value\n else:\n res_name = 'res{}'.format(layer_name[2])\n block_name = layer_name[3:]\n if block_name[0] == 'a':\n bottle_num = block_name\n elif block_name[0] == 'b':\n bottle_num = block_name\n if bottle_num == 'b':\n bottle_num = 'b1'\n elif block_name[0] == 'c':\n bottle_num = 'b2'\n bottle_name = '{0}_{1}'.format(res_name, bottle_num)\n res = getattr(model, res_name)\n bottle = getattr(res, bottle_name)\n layer = getattr(bottle, bn_branch[branch_name])\n if data_type == 'beta':\n assert layer.beta.array.shape == value.shape, name\n layer.beta.array = value\n elif data_type == 'gamma':\n assert layer.gamma.array.shape == value.shape, name\n layer.gamma.array = value\n\n for name, value in aux_params.items():\n value = value.asnumpy()\n layer_name, branch_name, _, data_type = name.split('_')\n if layer_name == 'bn':\n layer = model.res1.bn1\n if data_type == 'var':\n assert layer.avg_var.shape == value.shape, name\n layer.avg_var = value\n elif data_type == 'mean':\n assert layer.avg_mean.shape == value.shape, name\n layer.avg_mean = value\n else:\n res_name = 'res{}'.format(layer_name[2])\n block_name = layer_name[3:]\n if block_name[0] == 'a':\n bottle_num = block_name\n elif block_name[0] == 'b':\n bottle_num = block_name\n if bottle_num == 'b':\n bottle_num = 'b1'\n elif block_name[0] == 'c':\n bottle_num = 'b2'\n bottle_name = '{0}_{1}'.format(res_name, bottle_num)\n res = getattr(model, res_name)\n bottle = getattr(res, bottle_name)\n layer = getattr(bottle, bn_branch[branch_name])\n if data_type == 'var':\n assert layer.avg_var.shape == value.shape, name\n layer.avg_var = value\n elif data_type == 'mean':\n assert layer.avg_mean.shape == value.shape, name\n layer.avg_mean = value\n return model\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/convert_model.py","file_name":"convert_model.py","file_ext":"py","file_size_in_byte":8407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"463407696","text":"import pickle\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.metrics import confusion_matrix\r\nimport numpy as np\r\nimport glob\r\nimport os.path\r\n\r\ndef SVM(key, params):\r\n [train_features, val_features, train_imgs, val_imgs, y_train, y_val] = pickle.load(open(\"results/Results_\" + str(key) + \".pickle\", 'rb'))\r\n shp = train_features[0].shape\r\n train_features = train_features[0].reshape((shp[0],(shp[1] * shp[2] *shp[3])))\r\n shp = val_features[0].shape\r\n val_features = val_features[0].reshape((shp[0],(shp[1] * shp[2] *shp[3])))\r\n clf = LinearSVC(random_state=0)\r\n clf.fit(train_features, y_train)\r\n y_pred = clf.predict(val_features)\r\n conf = confusion_matrix(y_val, y_pred)\r\n print(conf)\r\n confSum = 0\r\n diagonalSum = 0\r\n classWiseAcc = []\r\n res = []\r\n resDict = {}\r\n for i in range(0, len(conf)):\r\n confSum += sum(conf[i])\r\n diagonalSum += conf[i][i]\r\n classWiseAcc.append(conf[i][i] / float(sum(conf[i])))\r\n\r\n res.append(params)\r\n res.append(diagonalSum / float(confSum))\r\n res.append(classWiseAcc)\r\n\r\n if os.path.isfile(\"./accuracy.txt\"):\r\n resDict = pickle.load(open(\"accuracy.txt\", \"rb\"))\r\n\r\n resDict[key] = res\r\n pickle.dump(resDict, open(\"accuracy.txt\", \"wb\"))\r\n\r\n print(\"accuracy : \", diagonalSum/float(confSum))\r\n print(\"class-wise: \", classWiseAcc)\r\n print('debug')\r\n return diagonalSum/float(confSum), classWiseAcc","sub_path":"classifyHandFeatures.py","file_name":"classifyHandFeatures.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"394983231","text":"\"\"\"PyGotham registration information.\"\"\"\n\nfrom flask import Blueprint, g, url_for\n\nfrom pygotham.frontend import direct_to_template\n\n__all__ = ('blueprint', 'get_nav_links')\n\nblueprint = Blueprint(\n 'registration',\n __name__,\n subdomain='',\n url_prefix='/registration',\n)\n\ndirect_to_template(\n blueprint,\n '/information/',\n template='registration/information.html',\n navbar_kwargs={'path': ('Registration', 'Information')},\n)\n\n\ndef get_nav_links():\n \"\"\"Return registration-related menu items.\"\"\"\n links = {\n 'Information': url_for('registration.information'),\n }\n if g.current_event.is_registration_active:\n links['Register'] = g.current_event.registration_url\n return {'Registration': links}\n","sub_path":"pygotham/frontend/registration.py","file_name":"registration.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"88584679","text":"from django.core.management.base import BaseCommand, CommandError\nimport requests\nfrom pprint import pprint\nimport json\nimport os\n\nAPI_KEY = os.environ.get('AIRTABLE_KEY')\nCOLLECTIONS_URL = 'https://api.airtable.com/v0/appC7p38pnmKYOJ16/Collections?maxRecords=1000&view=Grid%20view'\nAIRTABLE_HEADERS = {'Authorization': 'Bearer ' + API_KEY}\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n print('Pull data from airtable...')\n\n response = requests.get(COLLECTIONS_URL, headers=AIRTABLE_HEADERS)\n rows = response.json().get('records')\n\n json = self.get_permission_token()\n if len(json['data']['tokenCreate']['errors']) is not 0:\n print('Abort importing, authentication failed')\n return\n print('New token created: {} for import user: {} \\n\\n'.format(json['data']['tokenCreate']['token'], json['data']['tokenCreate']['user']['email']))\n headers = {\n 'Authorization': 'JWT {}'.format(json['data']['tokenCreate']['token'])\n }\n\n # pprint(product_rows)\n for row in rows:\n # pprint(row.get('fields'))\n fields = row.get('fields', None)\n # breakpoint()\n if not fields or fields.get('has_been_imported_auto') or fields.get('collection_id_auto'):\n # Skip import if has_been_imported or product_is_pk is truthy\n print('No fields or row has already been imported: id={}, name={}'.format(fields.get('collection_id_auto'), fields.get('name')))\n continue # to next row\n create_input = self.prepare_input(fields=fields)\n\n results = self.send_mutation(create_input, headers)\n\n if not results:\n self.handle_error()\n continue\n\n self.update_airtable(row['id'], results)\n\n\n def prepare_input(self, fields):\n if (fields.get('name') and fields.get('publication_date') and fields.get('slug')):\n input = {\n 'isPublished': fields.get('is_published'),\n 'name': fields.get('name'),\n 'slug': fields.get('slug'),\n 'description': fields.get('description_opt', 'N/A'),\n # 'descriptionJson': fields.get('description_json'),\n 'seo': {\n 'title': fields.get('seo_title_opt'),\n 'description': fields.get('seo_description_opt'),\n },\n 'publicationDate': fields.get('publication_date'),\n # products\n }\n if fields.get('background_image_opt'):\n input['airtableImageUrl'] = fields.get('background_image_opt')[0]['url']\n else:\n input['airtableImageUrl'] = ''\n\n return input\n\n return None\n\n\n def get_permission_token(self):\n mutation = '''\n mutation TokenCreateMutation($email: String!, $password: String!) {\n tokenCreate(email: $email, password: $password) {\n token\n errors{\n field\n message\n }\n user{\n id\n email\n }\n }\n }\n '''\n input = {\"email\": \"adrienshen.dev@gmail.com\", \"password\": os.environ.get('SALEOR_PASSWORD')}\n response = requests.post('http://localhost:8000/graphql/', json={'query': mutation, \"variables\": input})\n return response.json()\n\n\n def send_mutation(self, input, headers):\n\n mutation = '''\n mutation CollectionCreateMutation($input: CollectionCreateInput!) {\n collectionCreate(input: $input) {\n errors {\n field\n message\n }\n collection {\n id\n name\n }\n }\n }\n '''\n\n query = {'query': mutation, 'variables': { 'input': input }}\n response = requests.post('http://localhost:8000/graphql/', json=query, headers=headers)\n json = response.json()\n\n # breakpoint()\n print('Collection updated :: ', json['data']['collectionCreate'])\n if json.get('data') and len(json['data']['collectionCreate']['errors']) is not 0:\n print('Import of {} failed'.format(input['name']))\n return None\n else:\n return json['data']['collectionCreate']['collection']\n\n\n def update_airtable(self, id, updated):\n patch_payload = {\n \"records\": [{\n \"id\": id,\n \"fields\": {\n \"has_been_imported_auto\": True,\n \"collection_id_auto\": updated['id']\n }\n }]\n }\n # print('airtable update >> ')\n # pprint(d)\n\n response = requests.patch(\n 'https://api.airtable.com/v0/appC7p38pnmKYOJ16/Collections',\n json=patch_payload,\n headers={'Authorization': 'Bearer ' + API_KEY}\n )\n data = response.json()\n print('Airtable id updated: {} \\n'.format(data['records'][0]['id']))\n\n\n def handle_error(self):\n print('Implement send or log update failure somewhere')\n","sub_path":"saleor/imports/management/commands/importairtablecollection.py","file_name":"importairtablecollection.py","file_ext":"py","file_size_in_byte":5341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"51486397","text":"import pygame\r\nfrom random import randrange\r\n\r\ndef check_cell(cell, opponent_figures):\r\n if (0 <= cell[0] <= 7) and (0 <= cell[1] <= 7):\r\n if field[cell[1]][cell[0]] == ' ':\r\n return 1\r\n elif cell in opponent_figures:\r\n return 2\r\n else:\r\n return 0\r\n else:\r\n return 0\r\n \r\n\r\ndef check_pawn(cell, opponent_figures):\r\n admissible = []\r\n if opponent_figures == black_figures:\r\n direction, start = 1, 1\r\n else:\r\n direction, start = -1, 6\r\n if cell[1] == start:\r\n for x,y in ([cell[0], cell[1] + 1*direction], [cell[0], cell[1] + 2*direction]):\r\n if check_cell([x, y], opponent_figures) == 1:\r\n admissible.append([x, y])\r\n else:\r\n break\r\n else:\r\n if check_cell((cell[0], cell[1] + 1*direction), opponent_figures) == 1:\r\n admissible.append([cell[0], cell[1] + 1*direction])\r\n if check_cell([cell[0] + 1, cell[1] + 1*direction], opponent_figures) == 2:\r\n admissible.append([cell[0] + 1, cell[1] + 1*direction])\r\n if check_cell([cell[0] - 1, cell[1] + 1*direction], opponent_figures) == 2:\r\n admissible.append([cell[0] - 1, cell[1] + 1*direction])\r\n return admissible\r\n\r\n\r\ndef check_king(cell, opponent_figures):\r\n admissible = []\r\n for x, y in ([-1,-1], [0,-1], [1,-1], [1,0], [1,1], [0,1], [-1,1], [-1,0]):\r\n if check_cell([cell[0] + x, cell[1] + y], opponent_figures): admissible.append([cell[0] + x, cell[1] + y])\r\n return admissible\r\n\r\n\r\ndef check_bishop(cell, opponent_figures):\r\n admissible = []\r\n for x,y in zip(range(cell[0]+1, 8), range(cell[1]-1, -1, -1)):\r\n code = check_cell([x, y], opponent_figures)\r\n if code == 1:\r\n admissible.append([x, y])\r\n elif code == 2:\r\n admissible.append([x, y])\r\n break\r\n else:\r\n break\r\n for x,y in zip(range(cell[0]+1, 8), range(cell[1]+1, 8)):\r\n code = check_cell([x, y], opponent_figures)\r\n if code == 1:\r\n admissible.append([x, y])\r\n elif code == 2:\r\n admissible.append([x, y])\r\n break\r\n else:\r\n break\r\n for x,y in zip(range(cell[0]-1, -1, -1), range(cell[1]+1,8)):\r\n code = check_cell([x, y], opponent_figures)\r\n if code == 1:\r\n admissible.append([x, y])\r\n elif code == 2:\r\n admissible.append([x, y])\r\n break\r\n else:\r\n break\r\n for x,y in zip(range(cell[0]-1, -1, -1), range(cell[1]-1, -1, -1)):\r\n code = check_cell([x, y], opponent_figures)\r\n if code == 1:\r\n admissible.append([x, y])\r\n elif code == 2:\r\n admissible.append([x, y])\r\n break\r\n else:\r\n break\r\n return admissible\r\n\r\n\r\ndef check_knight(cell, opponent_figures):\r\n admissible = []\r\n for x,y in ([1,-2], [2,-1], [2,1], [1,2], [-1,2], [-2,1], [-2,-1], [-1,-2]):\r\n if check_cell([cell[0] + x, cell[1] + y], opponent_figures):\r\n admissible.append([cell[0] + x, cell[1] + y])\r\n return admissible\r\n\r\n\r\ndef check_rook(cell, opponent_figures):\r\n admissible = []\r\n for x in range(cell[0]+1, 8):\r\n code = check_cell([x, cell[1]], opponent_figures)\r\n if code == 1:\r\n admissible.append([x, cell[1]])\r\n elif code == 2:\r\n admissible.append([x, cell[1]])\r\n break\r\n else:\r\n break\r\n for x in range(cell[0]-1, -1, -1):\r\n code = check_cell([x, cell[1]], opponent_figures)\r\n if code == 1:\r\n admissible.append([x, cell[1]])\r\n elif code == 2:\r\n admissible.append([x, cell[1]])\r\n break\r\n else:\r\n break\r\n for y in range(cell[1]+1, 8):\r\n code = check_cell([cell[0], y], opponent_figures)\r\n if code == 1:\r\n admissible.append([cell[0], y])\r\n elif code == 2:\r\n admissible.append([cell[0], y])\r\n break\r\n else:\r\n break\r\n for y in range(cell[1]-1, -1, -1):\r\n code = check_cell([cell[0], y], opponent_figures)\r\n if code == 1:\r\n admissible.append([cell[0], y])\r\n elif code == 2:\r\n admissible.append([cell[0], y])\r\n break\r\n else:\r\n break \r\n return admissible\r\n\r\n\r\ndef check_queen(cell, opponent_figures):\r\n admissible = []\r\n for x,y in zip(range(cell[0]+1, 8), range(cell[1]-1, -1, -1)):\r\n code = check_cell([x, y], opponent_figures)\r\n if code == 1:\r\n admissible.append([x, y])\r\n elif code == 2:\r\n admissible.append([x, y])\r\n break\r\n else:\r\n break\r\n for x,y in zip(range(cell[0]+1, 8), range(cell[1]+1, 8)):\r\n code = check_cell([x, y], opponent_figures)\r\n if code == 1:\r\n admissible.append([x, y])\r\n elif code == 2:\r\n admissible.append([x, y])\r\n break\r\n else:\r\n break\r\n for x,y in zip(range(cell[0]-1, -1, -1), range(cell[1]+1,8)):\r\n code = check_cell([x, y], opponent_figures)\r\n if code == 1:\r\n admissible.append([x, y])\r\n elif code == 2:\r\n admissible.append([x, y])\r\n break\r\n else:\r\n break\r\n for x,y in zip(range(cell[0]-1, -1, -1), range(cell[1]-1, -1, -1)):\r\n code = check_cell([x, y], opponent_figures)\r\n if code == 1:\r\n admissible.append([x, y])\r\n elif code == 2:\r\n admissible.append([x, y])\r\n break\r\n else:\r\n break\r\n for x in range(cell[0]+1, 8):\r\n code = check_cell([x, cell[1]], opponent_figures)\r\n if code == 1:\r\n admissible.append([x, cell[1]])\r\n elif code == 2:\r\n admissible.append([x, cell[1]])\r\n break\r\n else:\r\n break\r\n for x in range(cell[0]-1, -1, -1):\r\n code = check_cell([x, cell[1]], opponent_figures)\r\n if code == 1:\r\n admissible.append([x, cell[1]])\r\n elif code == 2:\r\n admissible.append([x, cell[1]])\r\n break\r\n else:\r\n break\r\n for y in range(cell[1]+1, 8):\r\n code = check_cell([cell[0], y], opponent_figures)\r\n if code == 1:\r\n admissible.append([cell[0], y])\r\n elif code == 2:\r\n admissible.append([cell[0], y])\r\n break\r\n else:\r\n break\r\n for y in range(cell[1]-1, -1, -1):\r\n code = check_cell([cell[0], y], opponent_figures)\r\n if code == 1:\r\n admissible.append([cell[0], y])\r\n elif code == 2:\r\n admissible.append([cell[0], y])\r\n break\r\n else:\r\n break\r\n return admissible\r\n\r\n\r\ndef check_positions(cell, player_figures, opponent_figures):\r\n # cell = field[y][x] -> 'f','p'...\r\n admissible = []\r\n figure = field[cell[1]][cell[0]]\r\n if figure == 'p':\r\n admissible = check_pawn(cell, opponent_figures)\r\n elif figure == 'r':\r\n admissible = check_rook(cell, opponent_figures)\r\n elif figure == 'h':\r\n admissible = check_knight(cell, opponent_figures)\r\n elif figure == 'b':\r\n admissible = check_bishop(cell, opponent_figures)\r\n elif figure == 'q':\r\n admissible = check_queen(cell, opponent_figures)\r\n elif figure == 'k':\r\n admissible = check_king(cell, opponent_figures)\r\n return admissible\r\n \r\n\r\ndef draw_figures2():\r\n for figure in black_figures:\r\n x = figure[0] * CELL_SIZE + FIGURE_START_POS[0]\r\n y = figure[1] * CELL_SIZE + FIGURE_START_POS[1]\r\n pygame.draw.rect(screen,(0,0,0),(x, y, FIGURE_SIZE, FIGURE_SIZE), 0)\r\n figure_letter = text_font.render(field[figure[1]][figure[0]],0, COLOR_BACKGROUND)\r\n screen.blit(figure_letter, (x+MARGIN, y+MARGIN)) \r\n for figure in white_figures:\r\n x = figure[0] * CELL_SIZE + FIGURE_START_POS[0]\r\n y = figure[1]*CELL_SIZE + FIGURE_START_POS[1]\r\n pygame.draw.rect(screen,(255,255,255),(x, y, FIGURE_SIZE, FIGURE_SIZE), 0)\r\n pygame.draw.rect(screen,(0,0,0),(x, y, FIGURE_SIZE, FIGURE_SIZE), 1)\r\n figure_letter = text_font.render(field[figure[1]][figure[0]],0, COLOR_TEXT)\r\n screen.blit(figure_letter, (x+MARGIN, y+MARGIN))\r\n\r\n \r\ndef draw_figures():\r\n for figure in black_figures:\r\n x = figure[0] * CELL_SIZE + FIGURE_START_POS[0]\r\n y = figure[1] * CELL_SIZE + FIGURE_START_POS[1]\r\n if black_pictures[field[figure[1]][figure[0]]]:\r\n screen.blit(black_pictures[field[figure[1]][figure[0]]], (x,y)) \r\n for figure in white_figures:\r\n x = figure[0] * CELL_SIZE + FIGURE_START_POS[0]\r\n y = figure[1] * CELL_SIZE + FIGURE_START_POS[1]\r\n if black_pictures[field[figure[1]][figure[0]]]:\r\n screen.blit(white_pictures[field[figure[1]][figure[0]]], (x,y))\r\n\r\n\r\ndef take_figure(player_figures, opponent_figures):\r\n global selected, selected_figure, selected_cell, white_step, black_step\r\n opponent_figures.remove(selected_cell) # Удаляем фигуру у противника\r\n field[selected_cell[1]][selected_cell[0]] = field[selected_figure[1]][selected_figure[0]] # Перемещаем свою фигуру на место удаленной\r\n field[selected_figure[1]][selected_figure[0]] = ' ' # Очищаем клетку перемещенной фигуры\r\n player_figures[player_figures.index(selected_figure)] = selected_cell[:] # Меняем координаты выбранной фигуры на координаты выбранной ячейки\r\n white_step, black_step = black_step, white_step\r\n selected,selected_figure,selected_cell = False, None, None\r\n\r\n\r\ndef move_figure(player_figures):\r\n global selected, selected_figure, selected_cell, white_step, black_step\r\n field[selected_cell[1]][selected_cell[0]] = field[selected_figure[1]][selected_figure[0]] # Перемещаем фигуру на новое место\r\n field[selected_figure[1]][selected_figure[0]] = ' ' # Очищаем клетку перемещенной фигуры\r\n player_figures[player_figures.index(selected_figure)] = selected_cell[:]\r\n white_step, black_step = black_step, white_step\r\n selected,selected_figure,selected_cell = False, None, None\r\n\r\n\r\ndef init_pictures():\r\n\r\n def init_picture(file,colorkey = (0,255,0)):\r\n pic = pygame.transform.scale(pygame.image.load(file).convert(),(FIGURE_SIZE, FIGURE_SIZE))\r\n pic.set_colorkey(colorkey)\r\n return pic\r\n\r\n \r\n for black in ('p','r','h','b','q','k'):\r\n if black == 'p':\r\n black_pictures[black] = init_picture('pictures/pawn_black.png')\r\n elif black == 'r':\r\n black_pictures[black] = init_picture('pictures/rook_black.png')\r\n elif black == 'h':\r\n black_pictures[black] = init_picture('pictures/knight_black.png')\r\n elif black == 'b':\r\n black_pictures[black] = init_picture('pictures/bishop_black.png')\r\n elif black == 'q':\r\n black_pictures[black] = init_picture('pictures/queen_black.png')\r\n elif black == 'k':\r\n black_pictures[black] = init_picture('pictures/king_black.png')\r\n for white in ('p','r','h','b','q','k'):\r\n if white == 'p':\r\n white_pictures[white] = init_picture('pictures/pawn_white.png')\r\n elif white == 'r':\r\n white_pictures[white] = init_picture('pictures/rook_white.png')\r\n elif white == 'h':\r\n white_pictures[white] = init_picture('pictures/knight_white.png')\r\n elif white == 'b':\r\n white_pictures[white] = init_picture('pictures/bishop_white.png')\r\n elif white == 'q':\r\n white_pictures[white] = init_picture('pictures/queen_white.png')\r\n elif white == 'k':\r\n white_pictures[white] = init_picture('pictures/king_white.png')\r\n \r\n\r\ndef init_field_screen():\r\n grid = 0\r\n grid_colors = ((200,200,200),(50,50,50))\r\n for x in range(8):\r\n for y in range(8):\r\n grid +=1\r\n pygame.draw.rect(field_screen, grid_colors[grid%2],(CELL_SIZE*x,CELL_SIZE*y, CELL_SIZE, CELL_SIZE),0)\r\n grid +=1\r\n pygame.draw.rect(field_screen,(0,0,0), (0,0,FIELD_LENGTH,FIELD_LENGTH), 1)\r\n\r\n \r\nSIZE_X, SIZE_Y = 640, 480\r\nFPS = 75\r\nCOLOR_BACKGROUND = 255, 255, 255\r\nCOLOR_TEXT = 0, 0, 0\r\nCOLOR_YELLOW = 220, 220, 0\r\nCOLOR_RED = 220, 0, 0\r\nCOLOR_GREEN = 0, 220, 0\r\nCELL_SIZE = 40\r\nFIGURE_SIZE = 30\r\nFIELD_START_POS = 5, 5\r\nFIELD_LENGTH = 400\r\nCELL_SIZE = int(FIELD_LENGTH/8)\r\nMARGIN = 5 # Расстояние откраев ячейки до фигуры\r\nFIGURE_SIZE = CELL_SIZE - MARGIN*2\r\nFIGURE_START_POS = [FIELD_START_POS[0] + MARGIN, FIELD_START_POS[1] + MARGIN]\r\n\r\nfield = [\r\n ['r','h','b','q','k','b','h','r'],\r\n ['p','p','p','p','p','p','p','p'],\r\n [' ',' ',' ',' ',' ',' ',' ',' '],\r\n [' ',' ',' ',' ',' ',' ',' ',' '],\r\n [' ',' ',' ',' ',' ',' ',' ',' '],\r\n [' ',' ',' ',' ',' ',' ',' ',' '],\r\n ['p','p','p','p','p','p','p','p'],\r\n ['r','h','b','q','k','b','h','r'],\r\n ]\r\n\r\nblack_figures = [[j,i] for j in range(8) for i in range(6,8)]\r\nwhite_figures = [[j,i] for j in range(8) for i in range(2)]\r\nadmissible_positions = []\r\nselected_cell = None\r\nselected_figure = None\r\ngame = True\r\nselected = False\r\nblack_step = False\r\nwhite_step = True\r\nwhite_check = False\r\nblack_check = False\r\npygame.init()\r\npygame.display.set_caption('CHESS')\r\nscreen = pygame.display.set_mode((SIZE_X, SIZE_Y)) # pygame.DOUBLEBUF\r\nfield_screen = pygame.Surface((FIELD_LENGTH, FIELD_LENGTH))\r\ninit_field_screen()\r\nclock = pygame.time.Clock()\r\nblack_pictures = {}\r\nwhite_pictures = {}\r\ninit_pictures()\r\ntext_font = pygame.font.SysFont('arial', 24)\r\nwhile game:\r\n clock.tick(FPS)\r\n screen.fill(COLOR_BACKGROUND)\r\n screen.blit(field_screen, FIELD_START_POS)\r\n if selected:\r\n for cell in admissible_positions:\r\n if selected_figure in white_figures and cell in black_figures:\r\n pygame.draw.rect(screen, COLOR_RED, (cell[0]*CELL_SIZE + FIELD_START_POS[0], cell[1]*CELL_SIZE + FIELD_START_POS[1], CELL_SIZE, CELL_SIZE), 3)\r\n elif selected_figure in black_figures and cell in white_figures:\r\n pygame.draw.rect(screen, COLOR_RED, (cell[0]*CELL_SIZE + FIELD_START_POS[0], cell[1]*CELL_SIZE + FIELD_START_POS[1], CELL_SIZE, CELL_SIZE), 3)\r\n else:\r\n pygame.draw.rect(screen, COLOR_YELLOW, (cell[0]*CELL_SIZE + FIELD_START_POS[0], cell[1]*CELL_SIZE + FIELD_START_POS[1], CELL_SIZE, CELL_SIZE), 3)\r\n pygame.draw.rect(screen, COLOR_GREEN, (selected_cell[0]*CELL_SIZE + FIELD_START_POS[0],selected_cell[1]*CELL_SIZE + FIELD_START_POS[1],CELL_SIZE,CELL_SIZE), 3)\r\n for event in pygame.event.get():\r\n\r\n if event.type == pygame.QUIT:\r\n game = False\r\n elif event.type == pygame.KEYDOWN:\r\n pass\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.button == 1:\r\n x = ((pygame.mouse.get_pos()[0] - FIELD_START_POS[0]) // CELL_SIZE)\r\n y = ((pygame.mouse.get_pos()[1] - FIELD_START_POS[1]) // CELL_SIZE)\r\n selected_cell = [x, y]\r\n if selected_cell in white_figures and white_step:\r\n selected_figure = selected_cell[:]\r\n admissible_positions = check_positions(selected_figure[:], white_figures, black_figures)\r\n selected = True\r\n elif selected_cell in black_figures and black_step:\r\n selected_figure = selected_cell\r\n admissible_positions = check_positions(selected_figure[:], black_figures, white_figures)\r\n selected = True\r\n else:\r\n if selected_figure != None and white_step: # Если выбрана фигура и ячейка\r\n if selected_cell in black_figures and selected_cell in admissible_positions:\r\n take_figure(white_figures, black_figures)\r\n elif selected_cell in admissible_positions:\r\n move_figure(white_figures)\r\n else:\r\n selected = False\r\n selected_figure = None\r\n selected_cell = None\r\n elif selected_figure != None and black_step:\r\n if selected_cell in white_figures and selected_cell in admissible_positions:\r\n take_figure(black_figures, white_figures)\r\n elif selected_cell in admissible_positions:\r\n move_figure(black_figures)\r\n else:\r\n selected = False\r\n selected_figure = None\r\n selected_cell = None\r\n elif event.button == 3:\r\n x = (pygame.mouse.get_pos()[0] // CELL_SIZE) * CELL_SIZE + 5\r\n y = (pygame.mouse.get_pos()[1] // CELL_SIZE) * CELL_SIZE + 5\r\n if [x,y] in white_figures and white_step:\r\n selected = False\r\n selected_figure = None\r\n selected_cell = None\r\n elif [x,y] in black_figures and black_step:\r\n selected = False\r\n selected_figure = None\r\n selected_cell = None\r\n\r\n \r\n step_white = text_font.render('white step: ' + str(white_step), 0, COLOR_TEXT)\r\n step_black = text_font.render('black step: ' + str(black_step), 0, COLOR_TEXT)\r\n check_white = text_font.render('white check: ' + str(white_check), 0, COLOR_TEXT)\r\n check_black = text_font.render('black check: ' + str(black_check), 0, COLOR_TEXT)\r\n fps = text_font.render('fps: ' + str(int(clock.get_fps())), 0, COLOR_TEXT)\r\n screen.blit(step_white,(410, 20))\r\n screen.blit(step_black,(410, 50))\r\n screen.blit(check_white,(410, 100))\r\n screen.blit(check_black, (410, 130))\r\n screen.blit(fps,(410, 180))\r\n draw_figures() \r\n pygame.display.flip()\r\npygame.quit()\r\n\r\n# print(pygame.mouse.get_pos()) позиция мыши\r\n# rint(event.buttons[0]) нажата ли левая кнопка мыши\r\n\r\n# pygame.MOUSEMOTION - event.buttons = (0,0,0)\r\n# pygame.MOUSEBUTTONDWON - event.button = 1,2...5\r\n\r\n# text_font = pygame.font.Font(FONT_FILE, 24)\r\n# score_text = text_font.render('score:{: >3}'.format(int(score)),0,TEXT_COLOR)\r\n# screen.blit(score_text, (tetris_len_x+10,240))\r\n","sub_path":"chess_game0.5.2.py","file_name":"chess_game0.5.2.py","file_ext":"py","file_size_in_byte":18729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"597272411","text":"from ckeditor_uploader.fields import RichTextUploadingField\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\nfrom mptt.models import MPTTModel, TreeForeignKey\n# from django.forms import ModelForm\nfrom django.forms import ModelForm, TextInput, Textarea\nfrom django.db.models import Avg, Count\nfrom django.db.models.signals import post_save\nfrom django.utils.safestring import mark_safe\n# from mptt.fields import TreeForeignKey\n# from mptt.models import MPTTModel\n\nfrom django.dispatch import receiver\nfrom django_countries.fields import CountryField\nfrom django_countries.widgets import CountrySelectWidget\nfrom taggit.managers import TaggableManager\n\n\n\nclass Setting(models.Model):\n # STATUS = (\n # ('True', 'True'),\n # ('False', 'False'),\n # )\n title = models.CharField(max_length=150)\n keywords = models.CharField(max_length=255)\n description = models.CharField(max_length=255)\n company = models.CharField(max_length=50)\n address = RichTextUploadingField(blank=True)\n phone = models.CharField(blank=True, max_length=15)\n email = models.CharField(blank=True, max_length=50)\n icon = models.ImageField(blank=True, upload_to='images/')\n facebook = models.CharField(blank=True, max_length=50)\n instagram = models.CharField(blank=True, max_length=50)\n twitter = models.CharField(blank=True, max_length=50)\n youtube = models.CharField(blank=True, max_length=50)\n aboutus = RichTextUploadingField(null=True, blank=True)\n contact = RichTextUploadingField(null=True, blank=True)\n donate = RichTextUploadingField(null=True, blank=True)\n what_we_do = models.TextField(null=True, blank=True)\n our_mission = models.TextField(null=True, blank=True)\n our_vision = models.TextField(null=True, blank=True)\n references = models.TextField(null=True, blank=True)\n privacypolicy = RichTextUploadingField(null=True, blank=True)\n tandc = RichTextUploadingField(null=True, blank=True)\n # status = models.CharField(max_length=10, choices=STATUS)\n\n def __str__(self):\n return self.title\n\n\nclass ContactMessage(models.Model):\n\n \n email = models.CharField(blank=True, max_length=50)\n phone = models.IntegerField(blank=True) \n description = models.TextField(blank=True, max_length=255)\n ip = models.CharField(blank=True, max_length=100)\n note = models.CharField(blank=True, max_length=100)\n # date = models.DateTimeField()\n date = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.phone\n\n\nclass ContactForm(ModelForm):\n class Meta:\n model = ContactMessage\n fields = ['phone', 'email', 'description']\n # widgets = {\n # 'name': TextInput(attrs={'class': 'input col-lg-6 col-md-6 col-sm-6', 'placeholder': 'Name & Surname'}),\n # 'subject': TextInput(attrs={'class': 'input col-lg-6 col-md-6 col-sm-6', 'placeholder': 'Subject'}),\n # 'phone': TextInput(attrs={'class': 'input col-lg-6 col-md-6 col-sm-6', 'placeholder': 'phone'}),\n # 'email': TextInput(attrs={'class': 'input col-lg-6 col-md-6 col-sm-6', 'placeholder': 'Email Address'}),\n # 'message': Textarea(attrs={'class': 'input col-12', 'placeholder': 'Your Message', 'rows': '5'}),\n # }\n\n\nclass Faqs(models.Model):\n\n question = models.CharField(max_length=200)\n answer = RichTextUploadingField()\n\n def __str__(self):\n return self.question\n\n\n\n\n\nclass Subscribe(models.Model):\n email = models.CharField(blank=True, max_length=100)\n\n def __str__(self):\n return self.email\n\n\nclass SubscribeForm(ModelForm):\n class Meta:\n model = Subscribe\n fields = ['email']\n widgets = {\n\n 'email': TextInput(attrs={'class': 'email-box', 'placeholder': 'Email Address'}),\n\n }\n\n\n\nclass Gallery(models.Model):\n CHOICE = (\n\n ('Tattoo', 'Tattoo'),\n ('Haircut', 'Haircut'),\n ('Event', 'Event'),\n ('Others', 'Others'),\n )\n title = models.CharField(max_length=150)\n image = models.ImageField(blank=True, upload_to='images/')\n choice = models.CharField(max_length=10, choices=CHOICE)\n\n def __str__(self):\n return self.title\n\n\nclass Portfolio(models.Model):\n title = models.CharField(max_length=100)\n project_type = models.CharField(max_length=20, blank=True)\n host = models.CharField(max_length=30, blank=True)\n description = RichTextUploadingField(blank=True)\n detail = RichTextUploadingField(blank=True)\n slug = models.SlugField(null=False, unique=True)\n image = models.ImageField(blank=True, upload_to='app/images/')\n date = models.DateTimeField()\n\n def __str__(self):\n return self.title\n\n def image_tag(self):\n return mark_safe(''.format(self.image.url))\n\n image_tag.short_description = 'image'\n\n\nclass PortfolioImages(models.Model):\n portfolio = models.ForeignKey(Portfolio, on_delete=models.CASCADE)\n\n image = models.ImageField(blank=True, upload_to='app/images/')\n\n\n\n\nclass VideoCategory(MPTTModel):\n parent = TreeForeignKey(\n 'self', blank=True, null=True, related_name='children', on_delete=models.CASCADE)\n title = models.CharField(max_length=100)\n slug = models.SlugField(null=False, unique=True)\n\n def __str__(self):\n return self.title\n\n class MPTTMeta:\n order_insertion_by = ['title']\n\n def get_absolute_url(self):\n return reverse('category_detail', kwargs={'slug': self.slug})\n\n def __str__(self): # __str__ method elaborated later in\n # post. use __unicode__ in place of\n full_path = [self.title]\n k = self.parent\n while k is not None:\n full_path.append(k.title)\n k = k.parent\n return ' / '.join(full_path[::-1])\n\n\nclass Videos(models.Model):\n category = models.ForeignKey(VideoCategory, on_delete=models.CASCADE)\n title = models.CharField(max_length=100)\n link = models.CharField(max_length=255, blank=True, null=True)\n image = models.ImageField(upload_to='app/videos/', blank=True)\n podcast = models.BooleanField(default=False)\n slug = models.SlugField(null=False, unique=True)\n\n def __str__(self):\n return self.title\n\n def image_tag(self):\n return mark_safe(''.format(self.image.url))\n\n image_tag.short_description = 'image'\n\n\n\n\nclass Audios(models.Model):\n title = models.CharField(max_length=100)\n link = models.CharField(max_length=255)\n image = models.ImageField(upload_to='app/videos/', blank=True)\n slug = models.SlugField(null=False, unique=True)\n\n def __str__(self):\n return self.title\n\n\nclass Breadcrumb(models.Model):\n home = models.ImageField(blank=True, upload_to='images/breadcrumb/')\n news = models.ImageField(blank=True, upload_to='images/breadcrumb/')\n news_detail = models.ImageField(blank=True, upload_to='images/breadcrumb/')\n audio = models.ImageField(blank=True, upload_to='images/breadcrumb/')\n video = models.ImageField(blank=True, upload_to='images/breadcrumb/')\n store = models.ImageField(blank=True, upload_to='images/breadcrumb/')\n\n\n def __str__(self):\n return 'Breadcrumb Adverts'\n\n\nclass Nextmatch(models.Model):\n match = models.CharField(max_length=100)\n code = models.CharField(max_length=100)\n\n def __str__(self):\n return self.match\n\n\nclass Advert(models.Model):\n title = models.CharField(max_length=100)\n advert= models.ImageField(blank=True, upload_to='images/breadcrumb/')\n link = models.CharField(max_length=255, blank=True, null=True)\n home = models.BooleanField(default=False)\n news = models.BooleanField(default=False)\n store = models.BooleanField(default=False)\n audio = models.BooleanField(default=False)\n video = models.BooleanField(default=False)\n account = models.BooleanField(default=False)\n \n \n def __str__(self):\n return self.title\n","sub_path":"home/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"346470446","text":"import os,json,base64\nimport graphviz\nimport uuid\n\nfrom flask_bootstrap import Bootstrap\nfrom flask import Flask, request, session, redirect, url_for, flash, render_template\n\nfrom flask_wtf import FlaskForm\nfrom wtforms import SelectField, IntegerField, StringField, SubmitField\nfrom wtforms.validators import DataRequired\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n# main app\napp = Flask(__name__)\n# provide a secret key used to stop CSRF (cross site request forgery) attacks.\n# this secret key can/should be stored in environment variable, but for now:\napp.config['SECRET_KEY'] = 'ec1785e9-8736-46d5-b478-ac836cf71a0c'\n\nbootstrap = Bootstrap(app)\n\ndef rebuild_graph():\n global node_list\n global edge_list\n\n node_list = ['alpha0', 'alpha1', 'alpha2',\n 'beta0', 'beta1', 'beta2', 'beta3',\n 'gamma0']\n edge_list = [\n ('alpha0', 'beta0'), ('alpha0', 'beta1'), ('alpha0', 'beta2'), \n ('alpha1', 'beta0'), ('alpha1', 'beta1'), ('alpha1', 'beta3'), \n ('alpha2', 'beta2'), \n ('beta0', 'gamma0'), ('beta1','gamma0'), ('beta2','gamma0')]\n\ndef build_graph(node_list, edge_list):\n g= graphviz.Digraph(format='svg')\n g.attr('node', shape='box')\n g.attr(rankdir='LR', size='8,5')\n for x in node_list:\n g.node(x)\n for x in edge_list:\n g.edge(x[0], x[1])\n return g\n\ndef build_svg(g):\n image_file = g.pipe()\n # scrape the crap off the piped image\n image_file = str(image_file).replace(\"\\\\n\",\"\")\n image_file = str(image_file).replace(\"b'\",\"\")\n image_file = str(image_file).replace(\"'\",\"\")\n return str(image_file)\n\n \n@app.route('/', methods=['GET', 'POST'])\ndef index():\n rebuild_graph()\n g = build_graph(node_list, edge_list)\n return render_template('graphviz_test_v2.html', image_file=build_svg(g))\n\n@app.route('/test_ajax', methods=['POST'])\ndef ajax_test():\n global node_list, edge_list\n\n node = request.json['node_id']\n action = request.json['action']\n name = request.json['name']\n new_nodes = []\n new_edges = []\n if action=='Remove':\n for x in node_list:\n if x != name:\n new_nodes.append(x)\n for x in edge_list:\n if not (x[0]==name or x[1]==name):\n new_edges.append(x)\n\n node_list = new_nodes\n edge_list = new_edges\n else:\n new_nodes = node_list\n new_edges = edge_list\n\n g = build_graph(new_nodes, new_edges)\n return json.dumps({'result': node+' '+action + ' '+name, 'map':build_svg(g)})\n\n","sub_path":"graphviz_test_v2.py","file_name":"graphviz_test_v2.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"220259527","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nimport numpy as np\n# from numba import jit,autojit,njit\n# from numbapro import jit,njit\nfrom parakeet import jit as par_jit\nfrom numbapro import double, long_, int_, jit, autojit\nfrom MDAnalysis import *\n# import matplotlib.pyplot as plt # side-stepping mpl's backend\n# import os\nimport readparameters\n# import objgraph\n\ndef get_angles(u):\n \"\"\"\n given universe return chords of the (i+1-i-1) atoms(central ith atom) and angles\n input: universe\n output: atoms(coordinates), angles(angle_values)\n \"\"\"\n angles = u.angles\n angle_values = angles.angles()\n atoms = angles.atom2.positions\n # atoms = np.ascontiguousarray(atoms, dtype=np.float32), \n # angle_values = np.ascontiguousarray(angle_values, dtype=np.float32)\n atoms = np.asarray(atoms,dtype=np.float32)\n angle_values = np.asarray(angle_values,dtype=np.float32)\n return atoms, angle_values\n\n@jit('float32(float32,float32)',nopython=True)\ndef numbatrunc(a,l):\n \"\"\"\n get wrapped coordinates\n and get the nearest image distance\n \"\"\"\n a = abs(a)-l*int(abs(a)/l)\n if (a > l/2.0):\n return l - a\n else:\n return a\n\n\n# Nangles = angles.shape[0]\n# assert Natoms==Nangles, \" number of atoms %d should equal nuber of anngles %d \" % (Natoms,Nangles)\n\n# @jit('float32(float32[:,:],float32[:])',nopython=True, locals={'i':long_,'j':long_,'cos2':double,'d':double,'tmp':double, 'dPhi':double, 'dist_around':double,'n':long_, 'Natoms':long_, 'M':long_, 'k':long_})\n@jit('float32(float32[:,::1],float32[:],float32)',nopython=True)\ndef Calcg2(atoms, angles,L):\n \"\"\"\n calculate g2 parameter\n input: atoms Natoms*3 array, angles Natoms*1 array of angles\n here Natoms is not number of atoms in the system, is number of angle vertices\n \"\"\"\n # d local distance parameter , tmp needed to calculate d\n # dPhi - local angle difference, dist_around = cutoff\n # cos2 - local g2 parameter , n - counter of neighbours\n # g2 - the result of our program \n Natoms = atoms.shape[0]\n M = atoms.shape[1]\n cos2 = 0.0; g2 = 0.0; d = 0.0; tmp = 0.0; dPhi = 0.0;dist_around = 8.0 ; n = 0\n for i in range(Natoms):\n n = 0\n cos2 = 0.0\n for j in range(Natoms):\n if i!=j:\n d = 0.0\n tmp = 0.0\n for k in range(M):\n tmp = atoms[i,k] - atoms[j,k]\n tmp = numbatrunc(tmp,L)\n d += tmp*tmp\n d = np.sqrt(d)\n if d= other.\"\"\"\n mros = []\n for sub, sup in zip(self, other):\n i = min(mro(sub, sup))\n mros.append(i)\n return mros\n\n\nclass multimethod(dict):\n \"\"\"A callable directed acyclic graph of methods.\"\"\"\n\n def __new__(cls, func, strict=False):\n namespace = inspect.currentframe().f_back.f_locals\n self = functools.update_wrapper(dict.__new__(cls), func)\n self.strict, self.pending = bool(strict), set()\n return namespace.get(func.__name__, self)\n\n def __init__(self, func, strict=False):\n try:\n self[get_types(func)] = func\n except NameError:\n self.pending.add(func)\n\n def register(self, func):\n \"\"\"Decorator for registering function.\"\"\"\n self.__init__(func)\n return self if self.__name__ == func.__name__ else func\n\n def __get__(self, instance, owner):\n return self if instance is None else types.MethodType(self, instance)\n\n def parents(self, types):\n \"\"\"Find immediate parents of potential key.\"\"\"\n parents = {key for key in self if isinstance(key, signature) and key < types}\n return parents - {ancestor for parent in parents for ancestor in parent.parents}\n\n def clean(self):\n \"\"\"Empty the cache.\"\"\"\n for key in list(self):\n if not isinstance(key, signature):\n dict.__delitem__(self, key)\n\n def __setitem__(self, types, func):\n self.clean()\n types = signature(types)\n parents = types.parents = self.parents(types)\n for key in self:\n if types < key and (not parents or parents & key.parents):\n key.parents -= parents\n key.parents.add(types)\n dict.__setitem__(self, types, func)\n\n def __delitem__(self, types):\n self.clean()\n dict.__delitem__(self, types)\n for key in self:\n if types in key.parents:\n key.parents = self.parents(key)\n\n def __missing__(self, types):\n \"\"\"Find and cache the next applicable method of given types.\"\"\"\n self.evaluate()\n if types in self:\n return self[types]\n keys = self.parents(types)\n\n if len(keys) == 1 if self.strict else keys:\n return self.setdefault(types, self[min(keys, key=signature(types).__sub__)])\n raise DispatchError(\"{}{}: {} methods found\".format(self.__name__, types, len(keys)))\n\n def __call__(self, *args, **kwargs):\n \"\"\"Resolve and dispatch to best method.\"\"\"\n types = tuple(Type[x] if isinstance(x, type) else type(x) for x in args)\n fn = self[types]\n return fn(*args, ** kwargs)\n\n def evaluate(self):\n \"\"\"Evaluate any pending forward references.\n\n It is recommended to call this explicitly when using forward references,\n otherwise cache misses will be forced to evaluate.\n \"\"\"\n while self.pending:\n func = self.pending.pop()\n self[get_types(func)] = func\n\n\nclass multidispatch(multimethod):\n def register(self, *types):\n \"\"\"Return a decorator for registering in the style of `functools.singledispatch`.\"\"\"\n return lambda func: self.__setitem__(types, func) or func\n\n\ndef isa(*types):\n \"\"\"Partially bound `isinstance`.\"\"\"\n return lambda arg: isinstance(arg, types)\n\n\nclass overload(collections.OrderedDict):\n \"\"\"Ordered functions which dispatch based on their annotated predicates.\"\"\"\n\n __get__ = multimethod.__get__\n register = multimethod.register\n\n def __new__(cls, func):\n namespace = inspect.currentframe().f_back.f_locals\n self = functools.update_wrapper(super().__new__(cls), func)\n return namespace.get(func.__name__, self)\n\n def __init__(self, func):\n self[inspect.signature(func)] = func\n\n def __call__(self, *args, **kwargs):\n \"\"\"Dispatch to first matching function.\"\"\"\n for sig, func in reversed(self.items()):\n arguments = sig.bind(*args, **kwargs).arguments\n if all(predicate(arguments[name]) for name, predicate in func.__annotations__.items()):\n return func(*args, **kwargs)\n raise DispatchError(\"No matching functions found\")\n\n\nclass multimeta(type):\n \"\"\"Convert all callables in namespace to multimethods\"\"\"\n\n class multidict(dict):\n def __setitem__(self, key, value):\n curr = self.get(key, None)\n\n if callable(value):\n if callable(curr) and hasattr(curr, 'register'):\n value = curr.register(value)\n else:\n value = multimethod(value)\n\n dict.__setitem__(self, key, value)\n\n @classmethod\n def __prepare__(mcs, name, bases):\n return mcs.multidict()\n","sub_path":"multimethod.py","file_name":"multimethod.py","file_ext":"py","file_size_in_byte":6474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"259968778","text":"## TRAIN\nif phase_train is not None:\n #DO BN\n feed_dict_train = {x:X_train, y_:Y_train, phase_train: False}\n feed_dict_cv = {x:X_cv, y_:Y_cv, phase_train: False}\n feed_dict_test = {x:X_test, y_:Y_test, phase_train: False}\nelse:\n #Don't do BN\n feed_dict_train = {x:X_train, y_:Y_train}\n feed_dict_cv = {x:X_cv, y_:Y_cv}\n feed_dict_test = {x:X_test, y_:Y_test}\n\ndef get_batch_feed(X, Y, M, phase_train):\n mini_batch_indices = np.random.randint(M,size=M)\n Xminibatch = X[mini_batch_indices,:] # ( M x D^(0) )\n Yminibatch = Y[mini_batch_indices,:] # ( M x D^(L) )\n if phase_train is not None:\n #DO BN\n feed_dict = {x: Xminibatch, y_: Yminibatch, phase_train: True}\n else:\n #Don't do BN\n feed_dict = {x: Xminibatch, y_: Yminibatch}\n return feed_dict\n\nwith tf.Session() as sess:\n sess.run( tf.initialize_all_variables() )\n for iter_step in xrange(steps):\n feed_dict_batch = get_batch_feed(X_train, Y_train, M, phase_train)\n # Collect model statistics\n if iter_step%report_error_freq == 0:\n train_error = sess.run(fetches=l2_loss, feed_dict=feed_dict_train)\n cv_error = sess.run(fetches=l2_loss, feed_dict=feed_dict_cv)\n test_error = sess.run(fetches=l2_loss, feed_dict=feed_dict_test)\n\n do_stuff_with_errors(train_error, cv_error, test_error)\n # Run Train Step\n sess.run(fetches=train_step, feed_dict=feed_dict_batch)\n","sub_path":"tf_playground/batch_normalization/batch_norm_inference.py","file_name":"batch_norm_inference.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"111502039","text":"from gensim.models.doc2vec import *\nfrom gensim.models.word2vec import Word2Vec\nimport pickle\nimport re\nimport numpy as np\nimport pandas as pd\nimport nltk.corpus as nc\nimport random\nfrom IPython import embed\ndata_path = '../data/'\nstops = set(nc.stopwords.words(\"english\"))\n\nsrc = \"kitchen\"\ntgt = \"books\"\n\nf = open(data_path + 'amazon_domain_adaptation_dictionary_data.pickle','rb')\nDics = pickle.load(f)\nf.close()\nsrc_text = Dics[src]['trainx']\nsrc_label = Dics[src]['trainy']\ntgt_text = Dics[tgt]['trainx']\ntgt_label = Dics[tgt]['trainy']\ntotal_text = src_text + tgt_text\ntotal_st_label = [1]*len(src_text) + [0]*len(tgt_text)\ntotal_true_label = src_label + tgt_label\nprint(len(total_true_label))\nprint(len(total_text))\ntotal_text = np.array(total_text)\nprint(total_text.shape)\ntotal_st_label = np.array(total_st_label)\ntotal_true_label = np.array(total_true_label)\naa =list(range(len(total_text)))\nrandom.shuffle(aa)\n\n\ntotal_text = total_text[aa].tolist()\ntotal_st_label =total_st_label[aa]\ntotal_true_label = total_true_label[aa]\n#print(total_text)\nf =open(data_path+'amazon_source_'+src+'_target_'+tgt+'_shuffled.pickle','wb') \npickle.dump({'total_text':total_text,'st_label':total_st_label,'true_label':total_true_label},f)\nf.close()\n\ndocuments = []\nfor document in total_text:\n temp = document.replace('!','.').replace('?','.').replace(';','.').replace(':','.').replace('\\n','.').strip()\n documents.append(temp.split('.'))\nsentences2 = []\nfor doc in documents:\n for sen in doc:\n sen = sen.lower()\n sen = re.sub(\"[^a-zA-Z]\",\" \",sen)\n sentence = sen.split()\n sentences2.append(sentence)\nsentences = []\nfor uid, doc in enumerate(documents):\n for sen in doc:\n sen = sen.lower()\n sen = re.sub(\"[^a-zA-Z]\",\" \",sen)\n if total_st_label[uid] ==1:\n if total_true_label[uid] ==1:\n sentence = TaggedDocument(words = sen.split(),tags = ['DOC_%s'%(uid),'Positive'])\n else:\n sentence = TaggedDocument(words = sen.split(),tags = ['DOC_%s'%(uid),'Negative'])\n else:\n sentence = TaggedDocument(words = sen.split(), tags = ['DOC_%s'%(uid)])\n sentences.append(sentence)\n\nprint(\"length of sentences = \",len(sentences))\nprint(sentences[0])\n\ndel documents\n\nd_size = 200\n\nprint(\"start to train the word vectors using word2vec\")\n\nmodel_word2vec = Word2Vec(sentences2,size = d_size, window=3, min_count=10, workers = 30, sg=1, iter=30)\nmodel_word2vec.save(data_path+'word2vec_source_'+src+'_target_'+tgt+'.sg')\nprint(\"start to train document vectors using fixed word vectors\")\n\nfrom copy import deepcopy\nmodel_dbow = Doc2Vec(sentences,size = d_size,dbow_words =1, window = 3, min_count = 10, workers = 30, dm=0,iter=30)\nfile_name = data_path+'doc2vec_source_'+src+'_target_'+tgt\nmodel_dbow.save(file_name+'.dbow_sent')\nposi = model_dbow.docvecs._int_index('Positive')\nnegi = model_dbow.docvecs._int_index('Negative')\ndoctag = deepcopy(model_dbow.docvecs.doctag_syn0)\ndoctag = np.delete(doctag,[posi,negi],0)\ndoc2vec = {'st_label':total_st_label,'true_label':total_true_label,'docvec':doctag}\n\nembed()\nf = open(file_name+'_dbow_sent_data.pickle','wb')\npickle.dump(doc2vec,f)\nf.close()\n\n","sub_path":"sent_original_doc2vec_training.py","file_name":"sent_original_doc2vec_training.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"235455384","text":"#-*-coding:utf-8-*-\n\n\nimport cv2\nimport numpy as np\n\n\ndef draw_batch_pose(img, keypoints):\n for i, keypoint in enumerate(keypoints):\n draw_pose(img, keypoint)\n\n\ndef draw_pose(img, keypoints):\n reprojectdst, euler_angle = get_head_pose(keypoints, img)\n pyr = euler_angle.reshape(-1)\n tdx = np.mean(keypoints[0::2])\n tdy = np.mean(keypoints[1::2])\n draw_axis(img, pyr[1], pyr[0], pyr[2],\n tdx, tdy)\n\n for start, end in line_pairs:\n cv2.line(img, reprojectdst[start], reprojectdst[end], (0, 255, 255), 2)\n cv2.putText(img, f'pitch:{pyr[0]:.2f}', (0, 20 + 0 * 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))\n cv2.putText(img, f'yaw:{pyr[1]:.2f}', (0, 20 + 1 * 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))\n cv2.putText(img, f'roll:{pyr[2]:.2f}', (0, 20 + 2 * 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))\n\nimport math\nfrom math import cos, sin\n\n\ndef draw_axis(img, yaw, pitch, roll, tdx=None, tdy=None, size = 100):\n\n pitch = pitch * np.pi / 180\n yaw = -(yaw * np.pi / 180)\n roll = roll * np.pi / 180\n\n if tdx != None and tdy != None:\n tdx = tdx\n tdy = tdy\n else:\n height, width = img.shape[:2]\n tdx = width / 2\n tdy = height / 2\n\n # X-Axis pointing to right. drawn in red\n x1 = size * (cos(yaw) * cos(roll)) + tdx\n y1 = size * (cos(pitch) * sin(roll) + cos(roll) * sin(pitch) * sin(yaw)) + tdy\n\n # Y-Axis | drawn in green\n # v\n x2 = size * (-cos(yaw) * sin(roll)) + tdx\n y2 = size * (cos(pitch) * cos(roll) - sin(pitch) * sin(yaw) * sin(roll)) + tdy\n\n # Z-Axis (out of the screen) drawn in blue\n x3 = size * (sin(yaw)) + tdx\n y3 = size * (-cos(yaw) * sin(pitch)) + tdy\n\n cv2.arrowedLine(img, (int(tdx), int(tdy)), (int(x1),int(y1)),(0,0,255),3)\n cv2.arrowedLine(img, (int(tdx), int(tdy)), (int(x2),int(y2)),(0,255,0),3)\n cv2.arrowedLine(img, (int(tdx), int(tdy)), (int(x3),int(y3)),(255,0,0),2)\n\n return img\n\ndef distance(bbox, landmark):\n if bbox is None:\n bbox = np.array([\n np.min(landmark[0::2]),\n np.min(landmark[1::2]),\n np.max(landmark[0::2]),\n np.max(landmark[1::2]),\n ])\n left = landmark[:, 0::2].min()\n top = landmark[:, 1::2].min()\n right = landmark[:, 0::2].max()\n down = landmark[:, 1::2].max()\n return np.abs(bbox[0] - left), \\\n np.abs(bbox[1] - top), \\\n np.abs(bbox[2] - right), \\\n np.abs(bbox[3] - down)\n\n\ndef draw_distance(img, bbox, landmark, anchor=(0, 100)):\n if bbox is None:\n bbox = np.array([\n np.min(landmark[0::2]),\n np.min(landmark[1::2]),\n np.max(landmark[0::2]),\n np.max(landmark[1::2]),\n ])\n distance_key = ['left', 'top', 'right', 'bottom']\n distance_value = distance(bbox, landmark)\n whwh = [bbox[2] - bbox[0], bbox[3] - bbox[1],\n bbox[2] - bbox[0], bbox[3] - bbox[1], ]\n for j, (key, value) in enumerate(zip(distance_key, distance_value)):\n if key == 'left':\n color = choice_color(value > 10)\n elif key == 'right':\n color = choice_color(value > 10)\n elif key == 'bottom':\n color = choice_color(value > 20)\n else:\n color = (255, 255, 255)\n\n cv2.putText(img, f'{key}:{value:.2f}', (anchor[0], anchor[1] + j * 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)\n\n if key == 'left':\n color = choice_color(value / whwh[j] > 0.05)\n elif key == 'right':\n color = choice_color(value / whwh[j] > 0.05)\n elif key == 'bottom':\n color = choice_color(value / whwh[j] > 0.10)\n else:\n color = (255, 255, 255)\n\n cv2.putText(img, f'{key}:{value / whwh[j]:.2f}',\n (anchor[0] + 100, anchor[1] + j * 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)\n\n\ndef get_expression(bbox, label, eye_close_thres = 0.02,\n mouth_close_thres= 0.02,\n big_mouth_open_thres=0.08,\n hin = 160, win = 160):\n if bbox is None:\n bbox = np.array([\n np.min(label[0::2]),\n np.min(label[1::2]),\n np.max(label[0::2]),\n np.max(label[1::2]),\n ])\n bbox_height = bbox[3] - bbox[1]\n bbox_width = bbox[2] - bbox[0]\n left_eye_close = np.sqrt(\n np.square(label[37, 0] - label[41, 0]) +\n np.square(label[37, 1] - label[41, 1])) / bbox_height < eye_close_thres \\\n or np.sqrt(np.square(label[38, 0] - label[40, 0]) +\n np.square(label[38, 1] - label[40, 1])) / bbox_height < eye_close_thres\n right_eye_close = np.sqrt(\n np.square(label[43, 0] - label[47, 0]) +\n np.square(label[43, 1] - label[47, 1])) / bbox_height < eye_close_thres \\\n or np.sqrt(np.square(label[44, 0] - label[46, 0]) +\n np.square(label[44, 1] - label[46, 1])) / bbox_height < eye_close_thres\n\n ###half face\n half_face1 = np.sqrt(np.square(label[36, 0] - label[45, 0]) +\n np.square(label[36, 1] - label[45, 1])) / bbox_width < 0.5\n half_face2 = np.sqrt(np.square(label[62, 0] - label[66, 0]) +\n np.square(label[62, 1] - label[66, 1])) / bbox_height > 0.15\n # big mouth open\n big_mouth_open = np.sqrt(np.square(label[62, 0] - label[66, 0]) +\n np.square(label[62, 1] - label[66, 1])) / hin > big_mouth_open_thres\n\n return left_eye_close, right_eye_close, half_face1, half_face2, big_mouth_open\n\n\ndef choice_color(value):\n if value == True:\n color = (0, 0, 255)\n elif value == False:\n color = (255, 255, 0)\n else:\n color = (255,255,255)\n return color\n\n\ndef draw_expression(img, bbox, landmark, anchor=(0, 200)):\n expression = get_expression(bbox, landmark)\n expression_key = ['left_eye_close', 'right_eye_close', 'half_face1', 'half_face2', 'big_mouth_open']\n for j, (key, value) in enumerate(zip(expression_key, expression)):\n color = choice_color(value)\n cv2.putText(img, f'{key}:{value}', (anchor[0], anchor[1] + j * 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, color)\n\n# object_pts = np.float32([[6.825897, 6.760612, 4.402142],\n# [1.330353, 7.122144, 6.903745],\n# [-1.330353, 7.122144, 6.903745],\n# [-6.825897, 6.760612, 4.402142],\n# [5.311432, 5.485328, 3.987654],\n# [1.789930, 5.393625, 4.413414],\n# [-1.789930, 5.393625, 4.413414],\n# [-5.311432, 5.485328, 3.987654],\n# [2.005628, 1.409845, 6.165652],\n# [-2.005628, 1.409845, 6.165652],\n# [2.774015, -2.080775, 5.048531],\n# [-2.774015, -2.080775, 5.048531],\n# [0.000000, -3.116408, 6.097667],\n# [0.000000, -7.415691, 4.070434]])\nobject_pts = np.float32([[6.825897, 6.760612, 4.402142],\n [1.330353, 7.122144, 6.903745],\n [-1.330353, 7.122144, 6.903745],\n [-6.825897, 6.760612, 4.402142],\n [5.311432, 5.485328, 3.987654],\n [1.789930, 5.393625, 4.413414],\n [-1.789930, 5.393625, 4.413414],\n [-5.311432, 5.485328, 3.987654],\n [2.005628, 1.409845, 6.165652],\n [-2.005628, 1.409845, 6.165652]])\nreprojectsrc = np.float32([[10.0, 10.0, 10.0],\n [10.0, 10.0, -10.0],\n [10.0, -10.0, -10.0],\n [10.0, -10.0, 10.0],\n [-10.0, 10.0, 10.0],\n [-10.0, 10.0, -10.0],\n [-10.0, -10.0, -10.0],\n [-10.0, -10.0, 10.0]])\n\nline_pairs = [[0, 1], [1, 2], [2, 3], [3, 0],\n [4, 5], [5, 6], [6, 7], [7, 4],\n [0, 4], [1, 5], [2, 6], [3, 7]]\n\n\ndef get_head_pose(shape,img):\n h,w,_=img.shape\n K = [w, 0.0, w//2,\n 0.0, w, h//2,\n 0.0, 0.0, 1.0]\n # Assuming no lens distortion\n D = [0, 0, 0.0, 0.0, 0]\n\n cam_matrix = np.array(K).reshape(3, 3).astype(np.float32)\n dist_coeffs = np.array(D).reshape(5, 1).astype(np.float32)\n\n\n\n # image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],\n # shape[39], shape[42], shape[45], shape[31], shape[35],\n # shape[48], shape[54], shape[57], shape[8]])\n image_pts = np.float32([shape[17], shape[21], shape[22], shape[26], shape[36],\n shape[39], shape[42], shape[45], shape[31], shape[35]])\n _, rotation_vec, translation_vec = cv2.solvePnP(object_pts, image_pts, cam_matrix, dist_coeffs)\n\n reprojectdst, _ = cv2.projectPoints(reprojectsrc, rotation_vec, translation_vec, cam_matrix,\n dist_coeffs)\n\n reprojectdst = tuple(map(tuple, reprojectdst.reshape(8, 2)))\n\n # calc euler angle\n rotation_mat, _ = cv2.Rodrigues(rotation_vec)\n pose_mat = cv2.hconcat((rotation_mat, translation_vec))\n _, _, _, _, _, _, euler_angle = cv2.decomposeProjectionMatrix(pose_mat)\n\n return reprojectdst, euler_angle\n","sub_path":"face_alignment/headpose.py","file_name":"headpose.py","file_ext":"py","file_size_in_byte":9527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"92247387","text":"\n# Export functions\n\n\nimport reports\n\n# Input for queries\nprint('What properties has a game?')\ntitle = input('Type a game title: ')\n\n# Export file\nfile = 'game_stat.txt'\n\n# Export the results\nwith open('answers_part2.txt', 'w') as f:\n\n f.write(reports.get_most_played(file) + '\\n')\n\n f.write(str(reports.sum_sold(file)) + '\\n')\n\n f.write(\"%.3f million\" % (reports.get_selling_avg(file)) + '\\n')\n\n f.write(str(reports.count_longest_title(file)) + '\\n')\n\n f.write(str(reports.get_date_avg(file)) + '\\n')\n\n f.write(str(reports.get_game(file, title)) + ' - Title: ' + title + '\\n')\n\n f.write(str(reports.count_grouped_by_genre(file)) + '\\n')\n\n f.write(str(reports.get_date_ordered(file)))\n print('Exporting DONE!')\n","sub_path":"part2/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"343226047","text":"\"\"\"Stupid tests that ensure logging works as expected\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport sys\nimport logging as log\nfrom StringIO import StringIO\n\nimport beets.logging as blog\nfrom beets import plugins, ui\nimport beetsplug\nfrom test._common import unittest, TestCase\nfrom test import helper\n\n\nclass LoggingTest(TestCase):\n def test_logging_management(self):\n l1 = log.getLogger(\"foo123\")\n l2 = blog.getLogger(\"foo123\")\n self.assertEqual(l1, l2)\n self.assertEqual(l1.__class__, log.Logger)\n\n l3 = blog.getLogger(\"bar123\")\n l4 = log.getLogger(\"bar123\")\n self.assertEqual(l3, l4)\n self.assertEqual(l3.__class__, blog.StrFormatLogger)\n\n l5 = l3.getChild(\"shalala\")\n self.assertEqual(l5.__class__, blog.StrFormatLogger)\n\n def test_str_format_logging(self):\n l = blog.getLogger(\"baz123\")\n stream = StringIO()\n handler = log.StreamHandler(stream)\n\n l.addHandler(handler)\n l.propagate = False\n\n l.warning(\"foo {0} {bar}\", \"oof\", bar=\"baz\")\n handler.flush()\n self.assertTrue(stream.getvalue(), \"foo oof baz\")\n\n\nclass LoggingLevelTest(unittest.TestCase, helper.TestHelper):\n class DummyModule(object):\n class DummyPlugin(plugins.BeetsPlugin):\n def __init__(self):\n plugins.BeetsPlugin.__init__(self, 'dummy')\n self.import_stages = [self.import_stage]\n self.register_listener('dummy_event', self.listener)\n\n def log_all(self, name):\n self._log.debug('debug ' + name)\n self._log.info('info ' + name)\n self._log.warning('warning ' + name)\n\n def commands(self):\n cmd = ui.Subcommand('dummy')\n cmd.func = lambda _, __, ___: self.log_all('cmd')\n return (cmd,)\n\n def import_stage(self, session, task):\n self.log_all('import_stage')\n\n def listener(self):\n self.log_all('listener')\n\n def setUp(self):\n sys.modules['beetsplug.dummy'] = self.DummyModule\n beetsplug.dummy = self.DummyModule\n self.setup_beets()\n self.load_plugins('dummy')\n\n def tearDown(self):\n self.unload_plugins()\n self.teardown_beets()\n del beetsplug.dummy\n sys.modules.pop('beetsplug.dummy')\n\n def test_command_logging(self):\n self.config['verbose'] = 0\n with helper.capture_log() as logs:\n self.run_command('dummy')\n self.assertIn('dummy: warning cmd', logs)\n self.assertIn('dummy: info cmd', logs)\n self.assertNotIn('dummy: debug cmd', logs)\n\n for level in (1, 2):\n self.config['verbose'] = level\n with helper.capture_log() as logs:\n self.run_command('dummy')\n self.assertIn('dummy: warning cmd', logs)\n self.assertIn('dummy: info cmd', logs)\n self.assertIn('dummy: debug cmd', logs)\n\n def test_listener_logging(self):\n self.config['verbose'] = 0\n with helper.capture_log() as logs:\n plugins.send('dummy_event')\n self.assertIn('dummy: warning listener', logs)\n self.assertNotIn('dummy: info listener', logs)\n self.assertNotIn('dummy: debug listener', logs)\n\n self.config['verbose'] = 1\n with helper.capture_log() as logs:\n plugins.send('dummy_event')\n self.assertIn('dummy: warning listener', logs)\n self.assertIn('dummy: info listener', logs)\n self.assertNotIn('dummy: debug listener', logs)\n\n self.config['verbose'] = 2\n with helper.capture_log() as logs:\n plugins.send('dummy_event')\n self.assertIn('dummy: warning listener', logs)\n self.assertIn('dummy: info listener', logs)\n self.assertIn('dummy: debug listener', logs)\n\n def test_import_stage_logging(self):\n self.config['verbose'] = 0\n with helper.capture_log() as logs:\n importer = self.create_importer()\n importer.run()\n self.assertIn('dummy: warning import_stage', logs)\n self.assertNotIn('dummy: info import_stage', logs)\n self.assertNotIn('dummy: debug import_stage', logs)\n\n self.config['verbose'] = 1\n with helper.capture_log() as logs:\n importer = self.create_importer()\n importer.run()\n self.assertIn('dummy: warning import_stage', logs)\n self.assertIn('dummy: info import_stage', logs)\n self.assertNotIn('dummy: debug import_stage', logs)\n\n self.config['verbose'] = 2\n with helper.capture_log() as logs:\n importer = self.create_importer()\n importer.run()\n self.assertIn('dummy: warning import_stage', logs)\n self.assertIn('dummy: info import_stage', logs)\n self.assertIn('dummy: debug import_stage', logs)\n\n\ndef suite():\n return unittest.TestLoader().loadTestsFromName(__name__)\n\n\nif __name__ == b'__main__':\n unittest.main(defaultTest='suite')\n","sub_path":"test/test_logging.py","file_name":"test_logging.py","file_ext":"py","file_size_in_byte":5120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"383676781","text":"#libraries\r\n\r\nimport pandas as pd #data Analysis\r\nimport numpy as np #scientific compution\r\nimport seaborn as sns #statistical plotting\r\nimport matplotlib.pyplot as plt #plot\r\n# % matplotlib inline\r\nimport math #BASE MATHEMATICS\r\n \r\n# IMPORT DATA\r\nnon=[\" ?\", \"?\"]\r\nadult_data = pd.read_csv(\"adult.csv\" , names=['age','workclass','fnlwgt', 'education','education-num','marital-status','occupation','relationship','race','sex','capital-gain','capital-loss','hours-per-week','native-country','class'])\r\nadult_data.head(10)\r\nadult_data=adult_data.replace(non, np.nan) #replace '?' values to nan\r\n\r\n#Analyzing Data\r\n#For categorixed Data\r\nsns.countplot(x=\"class\" , data = adult_data)\r\n#in comparison with Class\r\nsns.countplot(x=\"class\" , hue=\"sex\", data = adult_data)\r\nsns.countplot(x=\"class\" , hue=\"race\", data=adult_data)\r\nsns.countplot(x=\"class\" , hue=\"relationship\", data=adult_data)\r\nsns.countplot(x=\"class\" , hue=\"workclass\", data=adult_data)\r\nsns.countplot(x=\"class\" , hue=\"education\", data=adult_data)\r\nsns.countplot(x=\"class\" , hue=\"occupation\", data=adult_data)\r\nsns.countplot(x=\"class\" , hue=\"marital-status\", data=adult_data)\r\nsns.countplot(x=\"class\" , hue=\"native-country\", data=adult_data)\r\n#alone\r\nsns.countplot(x=\"sex\", data = adult_data)\r\nsns.countplot(x=\"race\", data=adult_data)\r\nsns.countplot(x=\"relationship\", data=adult_data)\r\nsns.countplot(x=\"workclass\", data=adult_data)\r\nsns.countplot(x=\"education\", data=adult_data)\r\nsns.countplot(x=\"occupation\", data=adult_data)\r\nsns.countplot(x=\"marital-status\", data=adult_data)\r\nsns.countplot(x=\"native-country\", data=adult_data) #~99% are from US\r\nadult_data=adult_data.drop(\"native-country\" , axis=1) #deleting a maifold feature\r\n#For Continius Data ??\r\nadult_data[\"age\"].plot.hist()\r\nadult_data[\"fnlwgt\"].plot.hist(bins=20, figsize=(10,5))\r\nadult_data.info()\r\nadult_data[\"education-num\"].plot.hist()\r\nadult_data[\"capital-gain\"].plot.hist()\r\nadult_data[\"capital-loss\"].plot.hist()\r\nadult_data[\"hours-per-week\"].plot.hist()\r\nadult_data.isin([0]).sum()[10:12] #showing the number of 0 in column 1\r\n(adult_data.sum()[10:12]-adult_data.isin([0]).sum()[10:12])/(adult_data.sum()[10:12]) #as we sww 99.91% of datas in this column are 0\r\nadult_data=adult_data.drop(\"capital-gain\", axis=1)\r\nadult_data=adult_data.drop(\"capital-loss\" , axis=1)\r\n\r\n#Cleaning Data\r\nadult_data.isnull() #showing false/Treu values, True means it is naN\r\nadult_data.isnull().sum() #showing the number of True values in each column\r\nsns.heatmap(adult_data.isnull(), yticklabels=False, cmap=\"viridis\") #seeing naN values\r\nadult_data.dropna(inplace=True) #remove instances with missing or malformed features\r\n\r\n#categorical one-hot encoding\r\nsex=pd.get_dummies(adult_data['sex'], drop_first=True)\r\nraces=pd.get_dummies(adult_data['race'], drop_first=True)\r\nrelation=pd.get_dummies(adult_data['relationship'], drop_first=True)\r\nworkclass=pd.get_dummies(adult_data['workclass'], drop_first=True)\r\neducation=pd.get_dummies(adult_data['education'], drop_first=True)\r\noccupation=pd.get_dummies(adult_data['occupation'], drop_first=True)\r\nmarital=pd.get_dummies(adult_data['marital-status'], drop_first=True)\r\nincome=pd.get_dummies(adult_data['class'], drop_first=True)\r\nadult_data=pd.concat([adult_data,sex,races,relation,workclass,education,occupation,marital,income], axis=1 )\r\nadult_data=adult_data.drop(['sex','race','relationship','workclass','education','occupation','marital-status','class'], axis=1)\r\nadult_data.head(5)\r\n\r\n# Feature_scaling\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nscaler = MinMaxScaler()\r\nadult_data[['age', 'fnlwgt','education-num','hours-per-week']] = scaler.fit_transform(adult_data[['age', 'fnlwgt','education-num','hours-per-week']])\r\n\r\n#train data\r\nX=adult_data.drop(' >50K', axis=1)\r\ny=adult_data[' >50K']\r\n\r\n \r\n ","sub_path":"dataset2.py","file_name":"dataset2.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"380872704","text":"# script.py: populate Salesforce with data exported from Clover\n# Write customers who joined since the last time this script was run (or over the last\n# DEFAULT_NUM_DAYS_AGO days). Log output to LOG_FILE and make a copy of the csv in\n# \"CSV_HISTORY_DIR//customers.csv\".\n#\n# Required environment variables\n# SF_USERNAME : Salesforce username\n# SF_PASSWORD : Salesforce password\n# SF_TOKEN : security token\n\nimport argparse\nimport os\nimport sys\nimport re\nimport datetime as dt\nimport pandas as pd\nfrom datetime import datetime\nfrom shutil import copyfile\nfrom simple_salesforce import Salesforce\nfrom dotenv import load_dotenv\n\n# Define file names and defaults\nORDERS_CSV_FILE = 'orders.csv' # Orders csv file exported from Clover\nPAYMENTS_CSV_FILE = 'payments.csv' # Payments csv file exported from Clover\nCUSTOMERS_CSV_FILE = 'customers.csv' # Customers csv file exported from Clover\nCSV_HISTORY_DIR = 'csv_history' # Directory to which csvs are copied\nLOG_FILE = 'log.txt' # File to which this script appends logs\nDATE_FORMAT = '%m-%d-%Y' # Date format used by the script\nCSV_DATETIME_FORMAT = '%m-%d-%Y %I:%M%p' # Datetime format for each subdirectory under\n # CSV_HISTORY_DIR\nDATETIME_FORMAT = '%c' # Datetime format stored in LOG_FILE\n\n# Function to print to console and write to LOG_FILE\ndef log(message):\n print(message, end='')\n with open(LOG_FILE, 'a') as f:\n f.write(message)\n\n# Record script start time\nstart_time = dt.datetime.now()\n\n# Parse commandline arguments\nparser = argparse.ArgumentParser(description='Write data from Clover CSV files to SF.')\nparser.add_argument('start_date', help='start of date range (mm-dd-yyyy)')\nparser.add_argument('end_date', nargs='?', default=start_time.strftime(DATE_FORMAT),\n help='end of date range (mm-dd-yyyy)')\nparser.add_argument('-t', dest='test', action='store_true',\n help='flag for running on a test SF instance (must have a valid .env.test file)')\n\nargs = parser.parse_args()\n\n# Validate arguments\nif re.match(r'\\d{2}-\\d{2}-\\d{4}$', args.start_date) is None or \\\n re.match(r'\\d{2}-\\d{2}-\\d{4}$', args.end_date) is None:\n print('[ERROR] Dates must be of the form mm-dd-yyyy')\n sys.exit()\n\nshould_proceed = input(\"Running for dates {} through {} in {} (y/n): \"\n .format(args.start_date, args.end_date, 'test' if args.test else 'prod'))\nif should_proceed[0] != 'y':\n sys.exit()\n\n# Load environment variables\nif args.test:\n load_dotenv('.env.test')\nelse:\n load_dotenv()\n\n# Initialize SF connection\nconnection_args = {\n 'username' : os.environ['SF_USERNAME'],\n 'password' : os.environ['SF_PASSWORD'],\n 'security_token' : os.environ['SF_TOKEN']\n}\n\nif args.test:\n connection_args['domain'] = 'test'\n\nsf = Salesforce(**connection_args)\n\n# Lookup record type \"Item Shipment\"\ndata = sf.query_all(\"SELECT Id FROM RecordType WHERE Name = 'Item Shipment' LIMIT 1\")\nrecordtype_id = None\nif data['totalSize'] == 0:\n log('No RecordType found\\n')\n sys.exit()\nelse:\n recordtype_id = data['records'][0]['Id']\n\n# Lookup organization \"Curbside Sales (Outgoing)\"\ndata = sf.query_all(\"SELECT Id FROM Account WHERE Name = 'Curbside Sales (Outgoing)' LIMIT 1\")\norg_id = None\nif data['totalSize'] == 0:\n log('No organization found\\n')\n sys.exit()\nelse:\n org_id = data['records'][0]['Id']\n\n# Load csvs\ntry:\n orders = pd.read_csv(ORDERS_CSV_FILE)\n payments = pd.read_csv(PAYMENTS_CSV_FILE)\n customers = pd.read_csv(CUSTOMERS_CSV_FILE)\n print('CSV files loaded!')\nexcept FileNotFoundError:\n print('[ERROR] CSV file not found!')\n sys.exit()\n\n# Drop irrelevant columns\norders.drop(['Invoice Number', 'Order Number', 'Order Employee ID', 'Order Employee Name',\n 'Order Employee Custom ID', 'Currency', 'Tax Amount', 'Tip', 'Service Charge',\n 'Discount', 'Refunds Total', 'Manual Refunds Total', 'Credit Card Auth Code',\n 'Credit Card Transaction ID', 'Tender', 'Order Date', 'Order Total', 'Payments Total',\n 'Payment Note'], axis=1, inplace=True)\n\npayments.drop(['Payment ID', 'Transaction #', 'Note', 'Tender', 'Result', 'Order Date',\n 'External Payment ID', 'Invoice Number', 'Card Auth Code', 'Card Brand',\n 'Card Number', 'Card Entry Type', 'Currency', 'Tax Amount', 'Tip Amount',\n 'Service Charge Amount', 'Payment Employee ID', 'Payment Employee Name',\n 'Payment Employee Custom ID', 'Order Employee ID', 'Order Employee Name',\n 'Order Employee Custom ID', 'Device', '# Refunds', 'Refund Amount'], axis=1,\n inplace=True)\n\ncustomers.drop(['Customer ID', 'Address Line 1', 'Address Line 2', 'Address Line 3', 'City',\n 'State / Province', 'Postal / Zip Code', 'Country', 'Marketing Allowed',\n 'Additional Addresses'], axis=1, inplace=True)\n\n# Connect Order and Payment data together\ntransactions = payments.merge(orders, on='Order ID', how='inner')\n\n# Stage transaction data for SF\ntransactions.fillna('', inplace=True)\n\nif org_id is not None:\n transactions.insert(1, \"AccountId\", [org_id]*len(transactions.index), True)\n transactions.insert(2, \"Site_Served__c\", [org_id]*len(transactions.index), True)\n\nif recordtype_id is not None:\n transactions.insert(3, 'RecordTypeId', [recordtype_id]*len(transactions.index), True)\n\n# Create Donation and Shipment record names\nshipments = transactions.to_dict('records')\ndonation_shipment_names = []\n\nfor record in shipments:\n name = 'Missing Info' if record['Customer Name'] == '' else record['Customer Name'] \n amt = '$' + str(record['Amount']) + '0'\n datestring = record['Payment Date'][3:6] + ' ' + record['Payment Date'][:2] + \", \" + \\\n record['Payment Date'][7:11]\n date_time_obj = datetime.strptime(datestring, '%b %d, %Y')\n date = dt.datetime.strftime(date_time_obj, '%m/%d/%Y')\n crids = re.sub(\"[^0-9, ]\", \"\", record['Note']).strip()\n if crids == '':\n crids = 'Not Found'\n donation_shipment_names.append(name + ' - Shipment CRID(s): ' + crids + ' '+ date + ' = ' + amt)\n\ntransactions.insert(4, 'Name', donation_shipment_names, True)\ntransactions.rename(columns= {'Order Payment State':'StageName', 'Payment Date': 'CloseDate'},\n inplace=True)\ntransactions.drop(['Customer Name', 'Order ID', 'Note'], axis=1, inplace=True)\n\n# Filter out customers by join date\ncustomers_start_date = dt.datetime.strptime(args.start_date, DATE_FORMAT).date()\ncustomers_end_date = dt.datetime.strptime(args.end_date, DATE_FORMAT).date()\n\ncustomers = customers[pd.to_datetime(customers['Customer Since']).dt.date >= customers_start_date]\ncustomers = customers[pd.to_datetime(customers['Customer Since']).dt.date <= customers_end_date]\n\n# Clean customers data\ncustomers.drop('Customer Since', axis=1, inplace=True)\ncustomers.columns = ['FirstName', 'LastName', 'Phone', 'Email']\ncustomers.dropna(subset=['Email'], inplace=True)\ncustomers.fillna('', inplace=True)\ncustomers.insert(1, 'AccountId', [org_id]*len(customers.index), True)\n\n# Write records to SF\nlog(\"[{}] {} ({} - {})\\n\".format(\n 'TEST' if args.test else 'PROD',\n start_time.strftime(DATETIME_FORMAT),\n customers_start_date.strftime(DATE_FORMAT),\n customers_end_date.strftime(DATE_FORMAT)))\n\nlog('Transactions:\\n')\ntransaction_data = transactions.to_dict('records')\ntransactions_skipped = 0\nfor transaction in transaction_data:\n try:\n transaction['CloseDate'] = dt.datetime.strptime(transaction['CloseDate'],\n '%d-%b-%Y %I:%M %p %Z').isoformat()\n sf.Opportunity.create(transaction)\n log(\"\\tInserted transaction '{}'\\n\".format(transaction['Name']))\n except Exception as e:\n log(\"\\tCould not insert transaction '{}'\\n\\tError: {}\\n\"\n .format(transaction['Name'], str(e)))\n transactions_skipped += 1\n\nlog('Customers:\\n')\ncustomer_data = customers.to_dict('records')\ncustomers_skipped = 0\nfor customer in customer_data:\n try:\n sf.Contact.create(customer)\n log(\"\\tInserted customer {} {}\\n\".format(customer['FirstName'], customer['LastName']))\n except Exception as e:\n log(\"\\tCould not insert customer {} {}\\n\\tError: {}\\n\"\n .format(customer['FirstName'], customer['LastName'], str(e)))\n customers_skipped += 1\n\n# Make copies of csvs\ndest_csv_path = \"{}/{}/\".format(CSV_HISTORY_DIR, start_time.strftime(CSV_DATETIME_FORMAT))\nos.makedirs(os.path.dirname(dest_csv_path + 'input/'), exist_ok=True)\ncopyfile(ORDERS_CSV_FILE, dest_csv_path + 'input/orders.csv')\ncopyfile(PAYMENTS_CSV_FILE, dest_csv_path + 'input/payments.csv')\ncopyfile(CUSTOMERS_CSV_FILE, dest_csv_path + 'input/customers.csv')\n\nos.makedirs(os.path.dirname(dest_csv_path + 'actual/'), exist_ok=True)\ncustomers.to_csv(dest_csv_path + 'actual/customers.csv')\ntransactions.to_csv(dest_csv_path + 'actual/transactions.csv')\n\n# Delete input csvs\ntry:\n os.remove(ORDERS_CSV_FILE)\n os.remove(PAYMENTS_CSV_FILE)\n os.remove(CUSTOMERS_CSV_FILE)\nexcept OSError:\n pass\n\n# Log results of run\ncustomers_written = len(customer_data) - customers_skipped\nlog(\"{} customer records written, {} skipped\\n\".format(customers_written, customers_skipped))\n\ntransactions_written = len(transaction_data) - transactions_skipped\nlog(\"{} transaction records written, {} skipped\\n\\n\"\n .format(transactions_written, transactions_skipped))\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":9568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"642146041","text":"import copy\n\nfrom typing import Dict, Any\nfrom queue import Queue\nfrom threading import Thread\nfrom typing import NoReturn, List, Dict\n\nfrom cv2 import aruco\n\nfrom .detector_utils import load_inference_graph, detect_objects,\\\n get_center_points, draw_box_on_image\nfrom .calibration import Calibration\nfrom .synchronized_variable import SynchronizedVariable\n\n# 117 was found out by testing with static test-images. The real number of the\n# markers created by the Pupil team is not known/does not work\n# (see https://github.com/pupil-labs/pupil-helpers/tree/master/markers_stickersheet).\n_aruco_dict = aruco.Dictionary_create(117, 3)\n_aruco_parameters = aruco.DetectorParameters_create()\n\n\nclass Worker:\n def __init__(self, input_q: Queue, output_q: Queue, marker_q: Queue,\n center_points_q: Queue, cap_params: Dict[str, Any],\n latest_markers: SynchronizedVariable[List[Dict]],\n calibration: Calibration = None):\n self.input_q = input_q\n self.output_q = output_q\n self.marker_q = marker_q\n self.center_points_q = center_points_q\n self.cap_params = cap_params\n self.calibration = calibration\n self.detection_graph, self.sess = load_inference_graph()\n self.latest_markers = latest_markers\n\n def _detect_hands(self, frame, o_frame: SynchronizedVariable):\n # Actual detection. Variable boxes contains the bounding box\n # coordinates for hands detected, while scores contains the confidence\n # for each of these boxes.\n # Hint: If len(boxes) > 1 , you may assume you have found at least one\n # hand (within your score threshold)\n\n boxes, scores = detect_objects(frame, self.detection_graph, self.sess)\n\n center_points = get_center_points(self.cap_params[\"num_hands_detect\"],\n self.cap_params[\"score_thresh\"],\n scores, boxes,\n self.cap_params[\"im_width\"],\n self.cap_params[\"im_height\"])\n\n self.center_points_q.put(center_points)\n\n with o_frame.lock:\n draw_box_on_image(self.cap_params['num_hands_detect'],\n self.cap_params[\"score_thresh\"],\n scores, boxes, self.cap_params['im_width'],\n self.cap_params['im_height'],\n o_frame.value)\n\n def _detect_markers(self, frame, o_frame: SynchronizedVariable):\n corners, ids, _ = aruco.detectMarkers(frame, _aruco_dict,\n parameters=_aruco_parameters)\n\n if ids is None:\n return\n\n #print(\"Previous latest markers: {}\".format(self.latest_markers))\n markers = []\n for i in range(len(corners)):\n markers.append({\n 'id': int(ids[i][0]),\n 'corners': corners[i][0].astype(int).tolist(),\n })\n\n self.latest_markers.value = markers\n\n self.marker_q.put(markers)\n\n with o_frame.lock:\n aruco.drawDetectedMarkers(o_frame.value, corners, ids)\n\n if self.calibration is None:\n return\n\n rotation_vecs, translation_vecs, _ = aruco.estimatePoseSingleMarkers(\n corners, self.calibration.ml, self.calibration.camera_matrix,\n self.calibration.dist_coeffs)\n\n with o_frame.lock:\n for i in range(len(ids)):\n aruco.drawAxis(o_frame.value, self.calibration.camera_matrix,\n self.calibration.dist_coeffs, rotation_vecs[i],\n translation_vecs[i], 0.01)\n\n def run(self) -> NoReturn:\n while True:\n frame = self.input_q.get()\n\n if frame is None:\n self.output_q.put(frame)\n continue\n\n # Create copy of frame to draw boxes on (we don't want to draw\n # that on the input frame, because either of the detection\n # algorithms could be disturbed by this).\n o_frame = SynchronizedVariable(copy.deepcopy(frame))\n\n threads = []\n for method in [self._detect_hands, self._detect_markers]:\n thr = Thread(target=method, args=(frame, o_frame))\n thr.start()\n threads.append(thr)\n\n for thread in threads:\n thread.join()\n\n self.output_q.put(o_frame.value)\n\n # TODO Get translation matrices and draw AOI on image, BUT HOW DO\n # GET AOI HERE?\n","sub_path":"utils/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":4638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"485155139","text":"#!/usr/bin/python\n#------------------------------------------------\n# Script name: hellotemplate.py\n#\n# Description: \n# This script is a generic command line Python app template.\n# The example shows how to nicely handle parameter requirements \n# and parsing parameters. \n#\n# Pip packages needed:\n# arparse built-in.\n#\n# IBM i header if used there\n# !/QOpenSys/pkgs/bin/python3\n#\n# Parameters\n# --parm1=Parm 1\n# --parm2=Parm 2\n# --parm3=Parm 3\n# --help or -h lists all parms on command line.\n#------------------------------------------------\n# Useful Python links (any links that are educational for this script)\n# http://zetcode.com/python/argparse/\n# https://stackoverflow.com/questions/5943249/python-argparse-and-controlling-overriding-the-exit-status-code\n# https://www.techbeamers.com/use-try-except-python/\n# argument parse exceptions\n# https://stackoverflow.com/questions/8107713/using-argparse-argumenterror-in-python\n#------------------------------------------------\n# Imports\n#------------------------------------------------\nimport argparse\nimport sys\nfrom sys import platform\nimport os\nimport re\nimport time\nimport traceback \n\n#------------------------------------------------\n# Script initialization\n#------------------------------------------------\n\n# Initialize or set variables\nappdesc=\"This is the app desc\"\nexitcode=0 #Init exitcode\nexitmessage='' #Init the exit message\nparmsexpected=3; #How many parms do we need ?\n\n \ndef str2bool(strval):\n #-------------------------------------------------------\n # Function: str2bool\n # Desc: Constructor\n # :strval: String value for true or false\n # :return: Return True if string value is\" yes, true, t or 1\n #-------------------------------------------------------\n return strval.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\ndef trim(strval):\n #-------------------------------------------------------\n # Function: trim\n # Desc: Alternate name for strip\n # :strval: String value to trim. \n # :return: Trimmed value\n #-------------------------------------------------------\n return strval.strip()\n\ndef rtrim(strval):\n #-------------------------------------------------------\n # Function: rtrim\n # Desc: Alternate name for rstrip\n # :strval: String value to trim. \n # :return: Trimmed value\n #-------------------------------------------------------\n return strval.rstrip()\n\ndef ltrim(strval):\n #-------------------------------------------------------\n # Function: ltrim\n # Desc: Alternate name for lstrip\n # :strval: String value to ltrim. \n # :return: Trimmed value\n #-------------------------------------------------------\n return strval.lstrip()\n\n#Output messages to STDOUT for logging\nprint(\"-------------------------------------------------------------------------------\")\nprint(appdesc)\nprint(\"Start of Main Processing - \" + time.strftime(\"%H:%M:%S\"))\nprint(\"OS:\" + platform)\n\n#------------------------------------------------\n# Main script logic\n#------------------------------------------------\ntry: # Try to perform main logic\n \n # Check to see if all required parms were passed\n # Note: Old way to check if all parms passed\n #if len(sys.argv) < parmsexpected + 1:\n # raise Exception(str(parmsexpected) + ' required parms - [Parm 1] [Parm 2] [Parm 3]. Process cancelled.')\n \n # Set up the command line argument parsing\n # If the parse_args function fails, the program will\n # exit with an error 2. In Python 3.9, there is \n # an argument to prevent an auto-exit\n parser = argparse.ArgumentParser()\n parser.add_argument('-o', '--output', action='store_true', \n help=\"shows output\")\n parser.add_argument('--parm1', required=True,help=\"This is parm 1\")\n parser.add_argument('--parm2', required=True,help=\"This is parm 2\")\n parser.add_argument('--parm3', default=\"True\",required=False,help=\"This is optional parm 3. Default value=True\")\n # Parse the command line arguments \n args = parser.parse_args()\n \n # Set parameter work variables from command line args\n parmscriptname = sys.argv[0] #Script name\n parm1 = args.parm1 #Parameter 1\n parm2 = args.parm2 #Parameter 2\n parm3 = str2bool(args.parm3) #Parameter 3 - boolean\n \n # Output parameter variables to log file\n print(\"Parameters:\")\n print(\"Parm 1: \" + parm1)\n print(\"Parm 2: \" + str(parm2))\n print(\"Parm 3: \" + str(parm3))\n\n # Do some work now \n print(\"Hello World.\")\n \n # Set success info\n exitcode=0\n exitmessage=appdesc +\" completed normally.\"\n\n#------------------------------------------------\n# Handle Exceptions\n#------------------------------------------------\n# System Exit occurred. Most likely from argument parser\nexcept SystemExit as ex:\n print(\"Command line argument error.\")\n exitcode=ex.code # set return code for stdout\n exitmessage=str(ex) # set exit message for stdout\n #print('Traceback Info') # output traceback info for stdout\n #traceback.print_exc()\n sys.exit(99) \n\nexcept argparse.ArgumentError as exc:\n exitcode=99 # set return code for stdout\n exitmessage=str(exc) # set exit message for stdout\n #print('Traceback Info') # output traceback info for stdout\n #traceback.print_exc() \n sys.exit(99)\n\nexcept Exception as ex: # Catch and handle exceptions\n exitcode=99 # set return code for stdout\n exitmessage=str(ex) # set exit message for stdout\n print('Traceback Info') # output traceback info for stdout\n traceback.print_exc() \n sys.exit(99)\n\n#------------------------------------------------\n# Always perform final processing\n#------------------------------------------------\nfinally: # Final processing\n # Do any final code and exit now\n # We log as much relevent info to STDOUT as needed\n print('ExitCode:' + str(exitcode))\n print('ExitMessage:' + exitmessage)\n print(\"End of Main Processing - \" + time.strftime(\"%H:%M:%S\"))\n print(\"-------------------------------------------------------------------------------\")\n \n # Exit the script now\n sys.exit(exitcode) \n","sub_path":"general/hellotemplate.py","file_name":"hellotemplate.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"362719439","text":"\"\"\"\n374. Spiral Matrix\nhttps://www.lintcode.com/problem/spiral-matrix/description?_from=ladder&&fromId=131\nDFS\n\"\"\"\nDIRECTIONS = [\n (0, 1),\n (1, 0),\n (0, -1),\n (-1, 0)\n]\nfrom collections import deque\nclass Solution:\n \"\"\"\n @param matrix: a matrix of m x n elements\n @return: an integer list\n \"\"\"\n def spiralOrder(self, matrix):\n # write your code here\n if not matrix or not matrix[0]:\n return []\n res = []\n self.dfs(matrix, 0, 0, 0, set([(0, 0)]), res)\n return res\n \n def dfs(self, matrix, x, y, d, v, res):\n res.append(matrix[x][y])\n \n for i in range(4):\n delta_x, delta_y = DIRECTIONS[(d + i) % 4]\n nx, ny = x + delta_x, y + delta_y\n if not self.is_valid(nx, ny, matrix, v):\n continue\n v.add((nx, ny))\n self.dfs(matrix, nx, ny, (d + i) % 4, v, res)\n v.pop()\n break\n \n def is_valid(self, x, y, matrix, visited):\n n = len(matrix)\n m = len(matrix[0])\n \n if not (0 <= x < n and 0 <= y < m):\n return False\n if (x, y) in visited:\n return False\n return True","sub_path":"lintcode/374.1.py","file_name":"374.1.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"425801653","text":"from typing import Tuple\n\nfrom amino.lazy import lazy\n\nfrom kallikrein.matchers import contain, equal\nfrom kallikrein import k, unsafe_k\nfrom kallikrein.expectation import Expectation, AlgExpectation\nfrom kallikrein.matchers.either import be_right\nfrom amino import _, List\nfrom amino.list import Lists\n\nfrom tubbs.tatsu.scala import Parser\nfrom tubbs.tatsu.ast import AstMap, RoseAstTree, ast_rose_tree, AstList, AstElem\nfrom tubbs.formatter.scala.breaker import Breaker\nfrom tubbs.formatter.scala.indenter import Indenter\n\nfrom unit._support.ast import be_token\n\nfun = '''def fun[A, B, C](p1: Type1, p2: Type2)\\\n(implicit p3: A :: B, p4: Type4) = {\\\nval a = p1 match {\ncase x: Type2 => 5 case _ => 3 } }'''\n\n\nbroken_fun = '''\\\ndef fun[A, B, C]\n(p1: Type1, p2: Type2)\n(implicit p3: A :: B, p4: Type4) = {\nval a = p1 match {\ncase x: Type2 => 5\ncase _ => 3\n}\n}'''\n\n\nformatted_fun = '''def fun[A, B, C]\n(p1: Type1, p2: Type2)\n(implicit p3: A :: B, p4: Type4) = {\n val a = p1 match {\n case x: Type2 => 5\n case _ => 3\n }\n}'''\n\n\nlookbehind = 'def fun = { val b = a }'\n\nlookbehind_target = List(\n 'def fun = {',\n 'val b = a',\n '}'\n)\n\n\nclass ScalaFormatSpec:\n ''' formatting an AST\n check ast element ranges $range\n break lines of a function $break_fun\n tree bols $bols\n tree eols $eols\n range of bols and eols $bols_eols\n tree lines $tree_lines\n tree boundary nodes $boundary_nodes\n indent broken function lines $indent_broken\n break conditionally on previous breaks $break_lookbehind\n '''\n\n @lazy\n def parser(self) -> Parser:\n parser = Parser()\n parser.gen()\n return parser\n\n def parse(self, data: str) -> AstMap:\n ast = self.parser.parse(data, 'def')\n unsafe_k(ast).must(be_right)\n return ast.value\n\n def tree(self, code: str) -> RoseAstTree:\n return ast_rose_tree(code)\n\n @lazy\n def fun_ast(self) -> AstElem:\n return self.parse(fun)\n\n @lazy\n def fun_tree(self) -> RoseAstTree:\n return ast_rose_tree(self.fun_ast)\n\n @lazy\n def broken_fun_ast(self) -> AstElem:\n return self.parse(broken_fun)\n\n @lazy\n def broken_fun_tree(self) -> RoseAstTree:\n return ast_rose_tree(self.broken_fun_ast)\n\n def range(self) -> Expectation:\n def check_node(node: AstElem) -> None:\n if not isinstance(node, AstList):\n start, end = node.range\n unsafe_k(fun[start:end]) == node.text\n self.fun_ast.foreach(check_node)\n\n def tree_lines(self) -> Expectation:\n lines = self.broken_fun_ast.lines\n return k(lines.join_lines).must(equal(broken_fun))\n\n def bols(self) -> Expectation:\n bols = self.broken_fun_ast.bols\n return k(bols) == List(0, 17, 40, 77, 96, 115, 127, 129, 131)\n\n def eols(self) -> Expectation:\n eols = self.broken_fun_ast.eols\n return k(eols) == List(16, 39, 76, 95, 114, 126, 128, 130)\n\n def bols_eols(self) -> Expectation:\n lines = Lists.lines(broken_fun)\n ast = self.broken_fun_ast\n be = ast.bols.zip(ast.eols)\n def check(be: Tuple[List[int], List[int]], line: str) -> None:\n start, end = be\n return k(line) == broken_fun[start:end]\n exps = (be.zip(lines)).map2(check)\n return exps.fold(AlgExpectation)\n\n def boundary_nodes(self) -> Expectation:\n nodes = self.broken_fun_ast.boundary_nodes\n return (\n k(nodes.s.defkw).must(be_token('def')) &\n k(nodes.s.def_.rhs.body.head.def_.def_.rhs.block.body.head.casekw).must(be_token('case')) &\n k(nodes.s.def_.rhs.rbrace.brace).must(be_token('}'))\n )\n\n def break_fun(self) -> Expectation:\n breaker = Breaker(37)\n broken = breaker.format(self.fun_ast)._value()\n return k(broken / _.join_lines).must(contain(broken_fun))\n\n def indent_broken(self) -> Expectation:\n indenter = Indenter(2)\n indented = indenter.format(self.broken_fun_ast).value\n return k(indented / _.join_lines).must(contain(formatted_fun))\n\n def break_lookbehind(self) -> Expectation:\n breaker = Breaker(12)\n broken = breaker.format(self.parse(lookbehind)).value\n return k(broken).must(contain(lookbehind_target))\n\n__all__ = ('ScalaFormatSpec',)\n","sub_path":"unit/format/scala_spec.py","file_name":"scala_spec.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"182578248","text":"#! /usr/bin/python2\nfrom suds.xsd.doctor import ImportDoctor, Import\nfrom suds.client import Client\nfrom tabulate import tabulate\nimport sys\n\nclass Student():\n\tdef __init__(self, name, lastname, idStudent):\n\t\tself.name = name\n\t\tself.lastname = lastname\n\t\tself.idStudent = idStudent\n\n\tdef __str__(self):\n\t\treturn \" \" + self.name + \" \" + self.lastname\n\nurl = 'http://ws.espol.edu.ec/saac/wsandroid.asmx?WSDL'\nimp = Import('http://www.w3.org/2001/XMLSchema') # the schema to import.\nimp.filter.add('http://tempuri.org/')\n\ndoctor = ImportDoctor(imp)\nclient = Client(url, doctor=doctor)\n\nname = sys.argv[1]\nlastname = sys.argv[2]\n\nData = client.service.wsConsultarPersonaPorNombres(nombre = name,apellido = lastname)\nstudents = []\ntry:\n\ti = Data[1].__getitem__(0).__getitem__(0)\n\ts=Student(i.NOMBRES, i.APELLIDOS, i.CODESTUDIANTE)\n\tgrades = client.service.wsConsultaCalificaciones(anio = sys.argv[3], termino =sys.argv[4], estudiante=i.CODESTUDIANTE)\n\tprint(\"\\n\" + str(s) + \"\\n\")\nexcept:\n\tfor i in Data[1].__getitem__(0).__getitem__(0):\n\t\ttry:\n\t\t\tstudents.append(Student(i.NOMBRES, i.APELLIDOS, i.CODESTUDIANTE))\n\t\texcept:\n\t\t\tprint (\"\")\n\tcont=1\n\tfor i in students:\n\n\t\tprint (\"\\n\" + str(cont) + str(i))\n\t\tcont+=1\n\tprint (\"\\n\")\n\n\top=input(\"Ingrese el nombre a consultar: \")\n\tgrades = client.service.wsConsultaCalificaciones(anio = sys.argv[3], termino =sys.argv[4], estudiante=students[int(op)-1].idStudent)\n\n\n\n\ngrades_table = []\ntry:\n\tcal = grades[1].__getitem__(0).__getitem__(0)\n\tgrade = []\n\tgrade.append(cal.MATERIA)\n\tgrade.append(cal.NOTA1)\n\tgrade.append(cal.NOTA2)\n\tgrade.append(cal.NOTA3)\n\tgrade.append(cal.PROMEDIO)\n\tgrade.append(cal.ESTADO)\n\tgrade.append(cal.VEZ)\n\tgrades_table.append(grade)\nexcept:\n\tfor cal in grades.diffgram.NewDataSet.CALIFICACIONES:\n\t\tgrade = []\n\t\tgrade.append(cal.MATERIA)\n\t\tgrade.append(cal.NOTA1)\n\t\tgrade.append(cal.NOTA2)\n\t\tgrade.append(cal.NOTA3)\n\t\tgrade.append(cal.PROMEDIO)\n\t\tgrade.append(cal.ESTADO)\n\t\tgrade.append(cal.VEZ)\n\t\tgrades_table.append(grade)\n\nprint (tabulate(grades_table, headers=[\"MATERIA \",\"PARCIAL \",\"FINAL \",\"MEJORAMIENTO \",\"PROMEDIO \",\"ESTADO \",\"VEZ \"],tablefmt=\"fancy_grid\"))\n","sub_path":"wsSpider.py","file_name":"wsSpider.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"6904650","text":"#!/usr/bin/env python\n#\n# ======================================================================\n#\n# Brad T. Aagaard, U.S. Geological Survey\n# Charles A. Williams, GNS Science\n# Matthew G. Knepley, University of Chicago\n#\n# This code was developed as part of the Computational Infrastructure\n# for Geodynamics (http://geodynamics.org).\n#\n# Copyright (c) 2010-2017 University of California, Davis\n#\n# See COPYING for license information.\n#\n# ======================================================================\n#\n\n## @file unittests/pytests/problems/TestTimeStepUser.py\n\n## @brief Unit testing of TimeStepUser object.\n\nimport unittest\nfrom pylith.problems.TimeStepUser import TimeStepUser\n\nfrom pyre.units.time import second,year\n\nstepsE = [2*1.0, 2*2.0, 2*3.0]\n\n# ----------------------------------------------------------------------\nclass Integrator:\n\n def __init__(self, dt):\n self.dt = dt\n\n\n def stableTimeStep(self, mesh):\n return self.dt\n\n\n# ----------------------------------------------------------------------\nclass TestTimeStepUser(unittest.TestCase):\n \"\"\"\n Unit testing of TimeStepUser object.\n \"\"\"\n\n def setUp(self):\n from spatialdata.units.Nondimensional import Nondimensional\n normalizer = Nondimensional()\n normalizer._configure()\n normalizer.setTimeScale(0.5*year)\n\n tstep = TimeStepUser()\n tstep._configure()\n tstep.filename = \"data/timesteps.txt\"\n tstep.preinitialize()\n tstep.initialize(normalizer)\n self.tstep = tstep\n return\n \n\n def test_constructor(self):\n \"\"\"\n Test constructor.\n \"\"\"\n tstep = TimeStepUser()\n tstep._configure()\n return\n\n\n def test_initialize(self):\n \"\"\"\n Test initialize().\n \"\"\"\n tstep = self.tstep\n\n for stepE, step in zip(stepsE, tstep.steps):\n self.assertEqual(stepE, step)\n return\n\n\n def test_numTimeSteps(self):\n \"\"\"\n Test numTimeSteps().\n \"\"\"\n tstep = self.tstep\n\n self.assertEqual(1, tstep.numTimeSteps())\n\n tstep.totalTimeN = 12.0 / 0.5 # nondimensionalize\n self.assertEqual(6, tstep.numTimeSteps())\n\n tstep.loopSteps = True\n tstep.totalTimeN = 7.0 / 0.5 # nondimensionalize\n self.assertEqual(5, tstep.numTimeSteps())\n return\n\n\n def test_timeStep(self):\n \"\"\"\n Test timeStep().\n \"\"\"\n tstep = self.tstep\n\n step1 = 1.0 / 0.5 # nondimensionalize\n step2 = 2.0 / 0.5 # nondimensionalize\n step3 = 3.0 / 0.5 # nondimensionalize\n\n integrators = [Integrator(40.0),\n Integrator(80.0)]\n\n from pylith.topology.Mesh import Mesh\n mesh = Mesh()\n\n self.assertEqual(step1, tstep.timeStep(mesh, integrators))\n self.assertEqual(step2, tstep.timeStep(mesh, integrators))\n self.assertEqual(step3, tstep.timeStep(mesh, integrators))\n self.assertEqual(step3, tstep.timeStep(mesh, integrators))\n self.assertEqual(step3, tstep.timeStep(mesh, integrators))\n\n tstep.index = 0\n tstep.loopSteps = True\n self.assertEqual(step1, tstep.timeStep(mesh, integrators))\n self.assertEqual(step2, tstep.timeStep(mesh, integrators))\n self.assertEqual(step3, tstep.timeStep(mesh, integrators))\n self.assertEqual(step1, tstep.timeStep(mesh, integrators))\n self.assertEqual(step2, tstep.timeStep(mesh, integrators))\n\n integrators = [Integrator(0.01),\n Integrator(8.0)]\n caught = False\n try:\n tstep.timeStep(mesh, integrators)\n except RuntimeError:\n caught = True\n self.failUnless(caught)\n\n return\n\n\n def test_currentStep(self):\n \"\"\"\n Test currentStep().\n \"\"\"\n tstep = self.tstep\n\n integrators = [Integrator(4.0),\n Integrator(8.0)]\n\n from pylith.topology.Mesh import Mesh\n from pylith.mpi.Communicator import petsc_comm_world\n mesh = Mesh()\n #mesh.setComm(petsc_comm_world())\n\n tstep.timeStep(mesh, integrators)\n stepE = 1.0 / 0.5 # Nondimensionalize\n self.assertEqual(stepE, tstep.currentStep())\n return\n\n\n def test_factory(self):\n \"\"\"\n Test factory method.\n \"\"\"\n from pylith.problems.TimeStepUser import time_step\n ts = time_step()\n return\n\n\n# End of file \n","sub_path":"unittests/pytests/problems/TestTimeStepUser.py","file_name":"TestTimeStepUser.py","file_ext":"py","file_size_in_byte":4095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"256817009","text":"import os\nimport glob\nimport xml.etree.ElementTree as ET\nfrom dax import spiders, XnatUtils\nfrom collections import OrderedDict\n\n\nJOB_DIR = '${temp_dir}'\nENV_SOURCE = '${env_source}'\nIN_FILE = '${t1}'\nDB_TEMPLATE = '${dbt}'\nNIFTYPIPE_EXE = '${exe}'\nOPENMP_CORE = '${omp}'\nWORKING_DIR = '${working_dir}'\nEXE_CMD = '''{exe} \\\n-i {input} \\\n-o {output} \\\n-d {db_xml} \\\n--no_qsub \\\n--n_procs 1 \\\n--remove_tmp \\\n{omp} \\\n{wdir}'''\nOMP = '''--openmp_core {number_core}'''\nWDIR = '''--working_dir '{working_dir}' '''\n\n\ndef main():\n \"\"\" Main function.\"\"\"\n if ENV_SOURCE is not None and ENV_SOURCE != 'None' \\\n and os.path.isfile(ENV_SOURCE):\n os.system('sh {}'.format(ENV_SOURCE))\n\n _working_dir = None\n if WORKING_DIR != 'None':\n _working_dir = os.path.join(WORKING_DIR, '${assessor_label}')\n if not os.path.exists(_working_dir):\n os.makedirs(_working_dir)\n\n if os.path.exists(NIFTYPIPE_EXE) or \\\n XnatUtils.executable_exists(NIFTYPIPE_EXE):\n if OPENMP_CORE is not None and OPENMP_CORE != 'None':\n _omp = OMP.format(number_core=OPENMP_CORE)\n if _working_dir is not None:\n _wd = WDIR.format(working_dir=_working_dir)\n cmd = EXE_CMD.format(exe=NIFTYPIPE_EXE,\n input=IN_FILE,\n output=JOB_DIR,\n db_xml=DB_TEMPLATE,\n omp=_omp, wdir=_wd)\n os.system(cmd)\n make_pdf()\n else:\n raise Exception(\"Error: %s not found\" % (NIFTYPIPE_EXE))\n\n\ndef make_pdf():\n \"\"\"Method to make the PDF for the spider.\n\n :return: None\n \"\"\"\n # PDF pages:\n pdf_pages = {\n '1': os.path.join(JOB_DIR, 'GIF_parcellation_page1.pdf'),\n '2': os.path.join(JOB_DIR, 'GIF_parcellation_page2.pdf')\n }\n\n # Images outputs:\n bias_corrected = glob.glob(os.path.join(JOB_DIR,\n '*bias_corrected.nii.gz'))\n brain = glob.glob(os.path.join(JOB_DIR, '*brain.nii.gz'))\n labels = glob.glob(os.path.join(JOB_DIR, '*labels.nii.gz'))\n prior = glob.glob(os.path.join(JOB_DIR, '*prior.nii.gz'))\n seg = glob.glob(os.path.join(JOB_DIR, '*seg.nii.gz'))\n tiv = glob.glob(os.path.join(JOB_DIR, '*tiv.nii.gz'))\n list_images = [bias_corrected, brain, labels, seg, tiv, prior]\n\n # Page 1:\n images = []\n for index, image_file in enumerate(list_images):\n if len(image_file) != 1:\n err = '%s output image not found or more than one file found.'\n raise Exception(err % (image_file))\n images.append(image_file[0])\n\n labels = {\n '0': 'Bias Corrected',\n '1': 'Brain',\n '2': 'Labels',\n '3': 'Segmentation',\n '4': 'tiv',\n '5': 'prior'\n }\n cmap = {\n '0': 'gray',\n '1': 'gray',\n '2': None,\n '3': 'gray',\n '4': 'gray',\n '5': None\n }\n spiders.plot_images(pdf_pages['1'], 1, images,\n 'GIF_Parcellation Pipeline',\n image_labels=labels, cmap=cmap)\n\n # Page 2\n # Volumes:\n volumes = glob.glob(os.path.join(JOB_DIR, '*volumes.xml'))\n if len(volumes) != 1:\n err = '%s output csv file with information on volumes not found \\\nor more than one file found.'\n raise Exception(err % (volumes))\n tree = ET.parse(volumes[0])\n root = tree.getroot()\n di_stats = OrderedDict()\n for tissue in root.findall('tissues'):\n for item in tissue.findall('item'):\n di_stats[item.find('name').text] = item.find('volumeProb').text\n for tissue in root.findall('labels'):\n for item in tissue.findall('item'):\n di_stats[item.find('name').text] = item.find('volumeProb').text\n\n spiders.plot_stats(pdf_pages['2'], 2, di_stats,\n 'Volumes computed by GIF_Parcellation',\n columns_header=['Label Name', 'Volume ml'])\n\n # Join the two pages for the PDF:\n pdf_final = os.path.join(JOB_DIR, 'GIF_parcellation.pdf')\n spiders.merge_pdfs(pdf_pages, pdf_final)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pipelines/GIF_parcellation/v3.0.0/call.py","file_name":"call.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"427207315","text":"'''\nNUM_POP = 50 # P\nNUM_EPOCH = 15000\nCROSSOVER_PROB = 0.75\nNUM_RULES = 55\nCOND_LENGTH = 6\nMUTATION_PROB = 0.02\n\n'''\n'''\n Condition Length + 1 action bit\n Rule = (010101 0)\n Num Genes = 1x Rule Length * Number of Rules to be generated to ensure\n enough genes present.\n '''\n'''\nNUM_GENE = (COND_LENGTH + 1) * NUM_RULES\n# MUTATION_PROB = random.uniform(1.0/const.NUM_GENE, 1.0/const.NUM_POP,)\n'''\nNUM_POP = 500\nNUM_EPOCH = 2500\nCROSSOVER_PROB = 0.7\nNUM_RULES = 10\nCOND_LENGTH = 12\nTRAIN_COND_LENGTH = 6\n# DS3 Fitness is assessing if each gene is between a certain range. ie. gene1 < testGene < gene2 (so cond = 6 * 2)\nNUM_GENE = (COND_LENGTH + 1) * NUM_RULES\nMAX_FIT = 1000\nFLOAT_PRECISION = 6\nMAX_ACTION = 1\nMUTATION_PROB = 0.02\nMUTATION_AMOUNT = 0.35\n","sub_path":"const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"405331494","text":"import random\n\n\ngame = True\nlist = ['green', 'red', 'blue']\n\n\nanswer = input(\"WHAT WILL THE COLOR BE GREEN, RED, OR BLUE!?????\")\n\nnum = random.randint(1, 3)\n\nwhile game:\n if list[num] == answer:\n print(\"Congrats! You guessed correctly\")\n print(\"SEE YA\")\n game = False\n else: \n print(\"wrong answer, try again\")\n answer = input(\"WHAT WILL THE COLOR BE GREEN, RED, OR BLUE!?????\")\n","sub_path":"pie.py","file_name":"pie.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"531556966","text":"import urllib2\nimport datetime\nfrom xml.dom import minidom\n\n#########################################\n# UTILITIES #\n#########################################\n\ndef listify(x):\n \"\"\"Makes a list out of that which is not a list.\"\"\"\n if hasattr(x, \"__iter__\") and not isinstance(x, basestring):\n return x\n else:\n return [x]\n\ndef flatten(x):\n \"\"\"A generic list-flattener.\"\"\"\n result = []\n for el in x:\n if hasattr(el, \"__iter__\") and not isinstance(el, basestring):\n result.extend(flatten(el))\n else:\n result.append(el)\n return result\n\ndef query_join(x):\n \"\"\"\n Joins lists into strings seperated by the \"pipe\" symbol, as required by the Unofficial \n Altweeklies.com API.\n \"\"\"\n if hasattr(x, \"__iter__\") and not isinstance(x, basestring):\n return '|'.join(x)\n else:\n return x\n\n#########################################\n# EXCEPTIONS #\n#########################################\n\nclass InvalidParameter(Exception):\n \"\"\"Invalid query parameter. See Query.PARAMS for a list of valid parameters.\"\"\"\n pass\n\n\nclass InvalidSource(Exception):\n \"\"\"Unknown newspaper. See Connection.SOURCES for a current list of valid papers.\"\"\"\n pass\n\nclass InvalidCategory(Exception):\n \"\"\"\n Unknown category. See Connection.CATEGORIES for a dictionary of valid categories \n and subcategories.\n \"\"\"\n pass\n\nclass InvalidSubcategory(Exception):\n \"\"\"\n Unknown subcategory. See Connection.CATEGORIES for a dictionary of valid categories \n and subcategories. Otherwise, this exception is raised if a valid subcategory is not \n accompanied by its parent category in a Query object.\n \"\"\"\n pass\n\nclass SubcategoryWithoutCategory(Exception):\n \"\"\"Raised when a Query objects specifies a subcategory but no main category\"\"\" \n pass\n\n#########################################\n# CLASSES #\n#########################################\n\nclass Connection(object):\n \"\"\"\n A connection to the Unofficial Altweeklies.com API.\n\n When pyaltweeklies is first imported, Connection.SOURCES and Connections.CATEGORIES \n are populated via API call with those constants.\n \"\"\"\n\n def __repr__(self):\n return u''\n\n def request(self, query):\n \"\"\"\n Send query to the API server. Takes a Query object, returns a Response object.\n \"\"\"\n try:\n query._check()\n except:\n raise\n\n resp = urllib2.urlopen(self.BASE_URL+query._querystring)\n xml = resp.read()\n resp.close()\n return Response(xml)\n\n BASE_URL = 'http://www.northcoastjournal.com/altweeklies/'\n SOURCES = []\n CATEGORIES = {}\n\n @classmethod\n def get_sources(cls):\n \"\"\"\n Grabs a list of currently valid member paper names from the API server.\n \"\"\"\n resp = urllib2.urlopen(cls.BASE_URL+'?list_sources')\n dom = minidom.parse(resp)\n sources = [n.childNodes[0].data for n in dom.getElementsByTagName('name')]\n resp.close()\n dom.unlink()\n Connection.SOURCES = sources\n return sources\n\n @classmethod\n def get_categories(cls):\n \"\"\"\n Grabs a dictionary of currently valid categories/subcategories from the API server.\n \"\"\"\n resp = urllib2.urlopen(cls.BASE_URL+'?list_categories')\n dom = minidom.parse(resp)\n categories = {}\n category_nodes = [n for n in dom.getElementsByTagName('category')]\n for n in category_nodes:\n name = n.getElementsByTagName('name')[0].childNodes[0].data\n categories[name] = []\n for sub in n.getElementsByTagName('subcategory'):\n categories[name].append(sub.getElementsByTagName('name')[0].childNodes[0].data)\n resp.close()\n dom.unlink()\n Connection.CATEGORIES = categories\n return categories\n \nclass Query(object):\n \"\"\"\n\n Contains and constructs a query to pass to the API.\n\n Query parameters:\n\n age (integer): Maximum age stories to be returned, as measured in days from the time \n they went live on Altweeklies.com. The default is 21.\n\n num (integer): Maximum number of stories to return. The default is 10.\n\n source (string or list): Restrict search to a particular newspaper or list of newspapers. \n A current list of valid newspaper names can be found in \n Connection.SOURCES.\n\n exclude_source (string or list): Exclude a particular or newspaper list of newspapers from \n the search. A current list of valid newspaper names can \n be found in Connection.SOURCES.\n\n category (string or list): Restrict search to particular categories of content. A current dict \n of categories and subcategories can be found in Connection.CATEGORIES.\n\n subcategory (string or list): Restrict search to particular subcategories of content. A current \n dict of categories and subcategories can be found in \n Connection.CATEGORIES.\n\n q (string or list): Full-text search. If passed a list, the items in the list are joined together \n in an AND query.\n \"\"\"\n\n PARAMS = ('age', 'num', 'source', 'exclude_source', 'category', 'subcategory', 'q')\n\n def __init__(self, params={}):\n \"\"\"\n Initializes a Query object. Optionally takes a dict of parameters and query terms.\n\n \"\"\"\n for p in self.PARAMS:\n self.__setattr__(p, None)\n if params:\n for k, v in params.items():\n self.__setattr__(k, v)\n self._check()\n\n def __repr__(self):\n return u''\n\n def _check(self):\n \"\"\"\n Checks a Query for validity, raising an exception if query is not valid.\n \"\"\"\n query_set = set(self.__dict__.keys())\n conn_set = set(self.PARAMS)\n if not query_set.issubset(conn_set):\n raise InvalidParameter\n\n if self.source:\n source_set = set(listify(self.source))\n if not source_set.issubset(set(Connection.SOURCES)):\n raise InvalidSource\n \n if self.subcategory and not self.category:\n raise SubcategoryWithoutCategory\n\n if self.category:\n category_set = set(listify(self.category))\n if not category_set.issubset(set(Connection.CATEGORIES.keys())):\n raise InvalidCategory\n\n if self.subcategory:\n query_subcategory_set = set(listify(self.subcategory))\n available_subcategory_set = set(\n flatten([Connection.CATEGORIES[c] for c in category_set])\n )\n if not query_subcategory_set.issubset(available_subcategory_set):\n raise InvalidSubcategory\n\n @property\n def _querystring(self):\n \"\"\"\n Querystring representation of the query, ready to pass to the API.\n \"\"\"\n params = []\n for k, v in self.__dict__.items():\n if v:\n param = '='.join((k, urllib2.quote(unicode(query_join(v)))))\n params.append(param)\n querystring = '?' + '&'.join(params)\n return querystring\n\nclass Response(object):\n \"\"\"\n Simple wrapper for an API server XML response. Most of the action is in 'self.stories'.\n \"\"\"\n\n def __init__(self, xml):\n \"\"\"\n Takes a API XML response as an init argument and places the parsed Story objects in \n self.stories.\n \"\"\"\n self.xml = xml\n self.stories = self._make_stories()\n\n def __repr__(self):\n return u'' % self.count\n\n @property\n def count(self):\n \"\"\"\n Number of stories returned in the response.\n \"\"\"\n return len(self.stories)\n\n def _make_stories(self):\n \"\"\"\n Constructs Story objects out of an API XML reponse.\n \"\"\"\n dom = minidom.parseString(self.xml)\n nodes = dom.getElementsByTagName('story')\n stories = []\n for n in nodes:\n stories.append(Story(n))\n dom.unlink()\n return stories\n\nclass Story(object):\n \"\"\"\n Story attributes:\n\n title\n description\n link \n category\n subcategory\n published\n source\n \"\"\" \n def __init__(self, node):\n \"\"\"\n Makes a Story object out of an API XML node.\n \"\"\"\n self._parse_node(node)\n\n def __repr__(self):\n return u''.encode('utf-8') % (\n self.source.encode('utf-8'), self.title.encode('utf-8')\n )\n \n def _parse_node(self, node):\n \"\"\"\n Parses an API XML Node\n \"\"\"\n fields = [f for f in node.childNodes if f.nodeName != '#text']\n for field in fields:\n self.__setattr__(field.nodeName, field.childNodes[0].data)\n if 'published' in self.__dict__.keys():\n raw_date = self.published\n parsed_date = datetime.datetime.strptime(raw_date, '%Y-%m-%d %H:%M:%S')\n self.published = parsed_date\n\n#####################################\n# INITIALIZE #\n#####################################\n\nConnection.get_sources()\nConnection.get_categories()\n","sub_path":"altweeklies.py","file_name":"altweeklies.py","file_ext":"py","file_size_in_byte":9554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"173058312","text":"import pygame.font #这里显示文字要用到\nfrom pygame.sprite import Group #编组容器类模块\n\nfrom ship import Ship #引用了ship\n\n\nclass Scoreboard():\n '''记分板,显示得分信息的类'''\n\n def __init__(self,screen,ai_settings,stats):\n '''初始化值,各种得分相关值'''\n self.screen = screen #传参获得屏幕属性\n self.screen_rect = screen.get_rect() #获得屏幕对应的矩形 用于对齐屏幕\n self.ai_settings = ai_settings #传参获得设置的属性 一些传参\n self.stats = stats #传参获得游戏实时统计值\n\n #显示得分信息的字体设置\n self.text_color = (255,0,0) #字色,红\n self.font = pygame.font.Font('font/pingfang.ttf',36) #字体苹方36\n\n #准备得分、最高得分、等级、剩余命 图像\n self.prep_score() #调用👇下面2个渲染font\n self.prep_high_score() #显示命历史最高得分函数准备\n self.prep_level() #显示玩家等级函数准备\n self.prep_ships() #显示命数函数准备\n \n def prep_ships(self):\n '''显示剩余命(显示飞船)'''\n self.ships = Group() #编组容器\n for ship_number in range(self.stats.ships_left): #循环命数\n ship = Ship(self.ai_settings,self.screen) #建个飞船实例\n ship.image = pygame.transform.scale(ship.image,(24,24)) #太大了,缩小到24\n ship.rect.x = 10 + ship_number * 24 #命���飞船x坐标\n ship.rect.y = 10 #命数飞船y坐标\n self.ships.add(ship) #将建好小飞船实例加入ships组\n\n def prep_score(self):\n '''记分牌——得分信息渲染'''\n # ~ score_str = str(self.stats.score) #转字符串\n round_score = round(self.stats.score,-1) #保留有效数值-1位\n score_str = \"{:,}\".format(round_score) #转字符串之数字格式化 {:,}表示以逗号为分隔符的数字形式\n self.score_image = self.font.render(score_str,True,self.text_color\n ) #创建一个指定surfer并在其上绘制文字,去掉最后背景色的参数self.ai_settings.bg_color也可,默认透明\n\n #将得分放在屏幕右上角\n self.score_rect = self.score_image.get_rect() #获得渲染为图片的文字对应的矩形\n self.score_rect.right = self.screen_rect.right - 20 #位置屏幕左边空20\n self.score_rect.top =20 #位置屏幕顶部空20\n \n def prep_level(self):\n '''记玩家等级信息渲染 基本重用prep_score'''\n self.level_image = self.font.render('level:'+str(self.stats.level),True,\n self.text_color) #去掉最后背景色的参数,默认透明\n\n #将得分放在屏幕右上角\n self.level_rect = self.level_image.get_rect() #获得渲染为图片的文字对应的矩形\n self.level_rect.right = self.screen_rect.right - 20 #位置屏幕左边空20\n self.level_rect.top =20 + 36 #位置屏幕顶部空20+36\n \n def prep_high_score(self):\n '''将最高得分转换为渲染的图像'''\n high_socre = round(self.stats.high_score,-1) #保留有效数值-1位\n high_score_str = \"{:,}\".format(high_socre) #转字符串之数字格式化 {:,}表示以逗号为分隔符的数字形式\n high_score_str = '目前最高分'+high_score_str #加个文字提示\n self.high_score_image = self.font.render(high_score_str,True,\n self.text_color) #转换为surfer\n #将历史最高分置于屏幕顶部居中\n self.high_score_rect = self.high_score_image.get_rect() #获取对应矩形区域\n self.high_score_rect.centerx = self.screen_rect.centerx #中心x坐标对应屏幕中心x坐标\n self.high_score_rect.top = self.screen_rect.top #顶部坐标对应屏幕顶部坐标\n \n def show_score(self):\n '''在屏幕上显示渲染好的文字图'''\n self.screen.blit(self.score_image,self.score_rect) #将得分文字图绘制在其对应的rect位置上\n self.screen.blit(self.high_score_image,self.high_score_rect) #将历史最高分绘制在其对应的rect位置上\n self.screen.blit(self.level_image,self.level_rect) #将生于命数(小飞船)绘制在其对应的rect位置上\n \n #绘制剩余命(飞船)\n self.ships.draw(self.screen) \n","sub_path":"scoreboard.py","file_name":"scoreboard.py","file_ext":"py","file_size_in_byte":4582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"123107306","text":"from app.models.pet import Pet\nfrom pytz import timezone, utc\nimport logging\nfrom flask import request\nfrom app.models.dailystatistics import DailyStatistics\nfrom app.models.monthlystatistics import MonthlyStatistics\nfrom app.utils.s3 import upload_fileobj, delete_file\nfrom app import db, ma\nimport datetime\nimport uuid\n\nclass PetRecord(db.Model):\n __tablename__ = 'pet_record'\n timestamp = db.Column(db.DateTime(timezone=True), primary_key = True)\n pet_id = db.Column(db.Integer, db.ForeignKey('pet.id'), primary_key = True)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n result = db.Column(db.String(250), nullable = False)\n image_uuid = db.Column(db.String(250), nullable = False)\n # (timezone=True) make DATETIME to TIMESTAMP in Mysql\n created_date = db.Column(db.DateTime(timezone=True), nullable = False, default=datetime.datetime.now())\n last_modified_date = db.Column(db.DateTime(timezone=True), nullable = False, default=datetime.datetime.now())\n\n pet = db.relationship('Pet',\n backref = db.backref('records'), lazy = True)\n\n def __repr__(self):\n return f\"\"\n\n @staticmethod\n def update_stats(pet_id: int, user_id: int, new_datetime: datetime.datetime, old_datetime: datetime.datetime) -> None:\n \"\"\"\n By new pet record, Update daily_stat and monthly_stat tables\n \"\"\"\n # update daily_stat first\n try:\n DailyStatistics.update(pet_id, user_id, new_datetime, old_datetime)\n MonthlyStatistics.update(pet_id, user_id, new_datetime, old_datetime)\n except Exception as e:\n # bubbling for transaction\n raise e\n \n def upload_record_image(self, image_file):\n \"\"\"\n Upload record image file to S3\n\n :Param image_file: image file stream\n :Return: image_uuid if file was uploaded, else None\n \"\"\"\n image_uuid = str(uuid.uuid4())\n if upload_fileobj(image_file, image_uuid):\n return image_uuid\n else:\n return None\n \n def delete_record_image(self):\n \"\"\"\n Delete record image file in S3\n\n :Return: True if successed, else False\n \"\"\"\n if delete_file(self.image_uuid):\n return True\n else:\n return False\n \n @staticmethod\n def generate_fake(id: int, is_today:bool=False):\n '''\n :param: user_id\n '''\n from sqlalchemy.exc import IntegrityError\n from random import seed, choice\n from faker import Faker\n\n logging.info(f'PetRecord.generate_fake: id={id}, is_today={is_today}')\n \n fake = Faker()\n today_dt_deltas = [\n # today\n datetime.timedelta(hours = 0),\n datetime.timedelta(hours = 1),\n datetime.timedelta(hours = 2),\n datetime.timedelta(hours = 3),\n ]\n past_dt_deltas = [\n # weekdays\n datetime.timedelta(days = 1),\n datetime.timedelta(days = 1, hours = 1),\n datetime.timedelta(days = 2),\n datetime.timedelta(days = 2, hours = 1),\n datetime.timedelta(days = 3),\n datetime.timedelta(days = 3, hours = 1),\n datetime.timedelta(days = 4),\n datetime.timedelta(days = 4, hours = 1),\n datetime.timedelta(days = 5),\n datetime.timedelta(days = 5, hours = 1),\n # week\n datetime.timedelta(weeks = 1),\n datetime.timedelta(weeks = 1, hours = 1),\n datetime.timedelta(weeks = 2),\n datetime.timedelta(weeks = 2, hours = 1),\n # month\n datetime.timedelta(days = 31),\n datetime.timedelta(days = 31, hours = 1),\n datetime.timedelta(days = 31 * 2),\n datetime.timedelta(days = 31 * 2, hours = 1),\n datetime.timedelta(days = 31 * 3),\n datetime.timedelta(days = 31 * 3, hours = 1),\n datetime.timedelta(days = 31 * 4),\n datetime.timedelta(days = 31 * 4, hours = 1),\n datetime.timedelta(days = 31 * 5),\n datetime.timedelta(days = 31 * 5, hours = 1),\n ]\n\n seed()\n # get random kst datetime for test\n datetime_now = utc.localize(datetime.datetime.utcnow()).astimezone(timezone('Asia/Seoul'))\n logging.info(f'datetime_now = ${datetime_now}')\n # Check pet existency\n pet = Pet.query.filter_by(user_id = id).first()\n if(pet is None):\n raise Exception(\"Fake user doesn't have pet id\")\n # Create fake records\n if(is_today):\n for i in range(len(today_dt_deltas)):\n gen_time = datetime_now - today_dt_deltas[i]\n logging.info(f'gen_time : {gen_time}')\n new_record = PetRecord(\n timestamp = gen_time,\n result = choice(['SUCCESS', 'FAIL']),\n image_uuid = '(FAKE)' + fake.uuid4(),\n\n # id param\n user_id=id,\n pet_id=pet.id\n )\n try:\n db.session.add(new_record)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n logging.error(e)\n \n # update stat tables\n PetRecord.update_stats(id, id, gen_time, gen_time)\n else:\n for i in range(len(past_dt_deltas)):\n gen_time = datetime_now - past_dt_deltas[i]\n logging.info(f'gen_time : {gen_time}')\n new_record = PetRecord(\n timestamp = gen_time,\n result = choice(['SUCCESS', 'FAIL']),\n image_uuid = '(FAKE)' + fake.uuid4(),\n\n # id param\n user_id=id,\n pet_id=pet.id\n )\n try:\n db.session.add(new_record)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n logging.error(e)\n \n # update stat tables\n PetRecord.update_stats(id, id, gen_time, gen_time)\n\n\n\n logging.info('Successed to set fake pet records')\n\n\nclass PetRecordSchema(ma.SQLAlchemyAutoSchema):\n class Meta:\n model = PetRecord\n include_fk = True\n\nclass DelPetRecordSchema(ma.SQLAlchemySchema):\n class Meta:\n model = PetRecord\n \n timestamp = ma.auto_field()\n pet_id = ma.auto_field()\n user_id = ma.auto_field()\n\nclass RecordQuerySchema(ma.Schema):\n class Meta:\n fields = [\"timestamp\"]","sub_path":"app/models/pet_record.py","file_name":"pet_record.py","file_ext":"py","file_size_in_byte":6832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"342701437","text":"from PyQt5.QtWidgets import (QGraphicsTextItem,QGraphicsItem,QDialog,QTextEdit,QLabel,QLineEdit,\n QDialogButtonBox,QGridLayout,QGraphicsRectItem)\nfrom PyQt5.QtGui import QPen,QColor\n\nPointSize = 10\n\nclass TextItem(QGraphicsTextItem):\n def __init__(self, text, position, scene, parent):\n super(TextItem, self).__init__(text)\n self.setFlags(QGraphicsItem.ItemIsSelectable|\n QGraphicsItem.ItemIsMovable)\n self.setPos(position)\n #self.setTransform(matrix)\n scene.clearSelection()\n scene.addItem(self)\n\n self.setParentItem(parent)\n\n def parentWidget(self):\n return self.scene().views()[0]\n\n def itemChange(self, change, variant):\n return QGraphicsTextItem.itemChange(self, change, variant)\n\nclass MarkItemDialog(QDialog):\n def __init__(self, item=None, position=None, convert_pose = None, scene=None, parent = None, markTabList = None):\n super(MarkItemDialog, self).__init__()\n\n self.item = item\n self.position = position\n self.pose = convert_pose\n self.scene = scene\n self.base = parent\n self.markTabList = markTabList\n\n if self.markTabList is not None:\n self.markTabList.registerCB(self.removeMarkPoint)\n\n self.editor = QLineEdit()\n self.editor.textChanged.connect(self.updateUi)\n\n markName = QLabel(\"标签点:\")\n markName.setBuddy(self.editor)\n\n markPose = QLabel(\"标签坐标:\")\n pose = \"({0} x {1})\\n({2},{3},{4},{5})\".format(self.pose['position']['x'], self.pose['position']['y'],\n self.pose['orientation']['x'], self.pose['orientation']['y'],\n self.pose['orientation']['z'], self.pose['orientation']['w'])\n poseLabel = QLabel(pose)\n\n self.buttonBox = QDialogButtonBox(QDialogButtonBox.Ok |\n QDialogButtonBox.Cancel)\n self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False) # OK 按钮默认设置为不可点击(灰色)\n self.buttonBox.accepted.connect(self.accept)\n self.buttonBox.rejected.connect(self.reject)\n\n layout = QGridLayout()\n layout.addWidget(markName, 0, 0)\n layout.addWidget(self.editor, 0, 1)\n layout.addWidget(markPose, 1, 0)\n layout.addWidget(poseLabel, 1, 1)\n layout.addWidget(self.buttonBox, 4, 1)\n self.setLayout(layout)\n\n def accept(self):\n if self.item is None:\n self.item = TextItem(\"\", self.position, self.scene, self.base)\n self.item.setPlainText(self.editor.text()) # 标签名\n self.item.setData(0, self.editor.text())\n #self.item.update()\n\n self.rect = QGraphicsRectItem()\n # 设置画笔等样式\n pen = QPen()\n pen.setWidth(2)\n pen.setColor(QColor(0, 160, 230))\n self.rect.setPen(pen)\n self.rect.setBrush(QColor(247, 160, 57))\n self.rect.setRect(-5, -5, 10, 10)\n # setPos 使用的是其 父类坐标系 的坐标,此处的父类为 pixmap 类对象,因此使用的是相对于 pixmap的坐标,而不是scene坐标\n self.rect.setPos(self.position.x(), self.position.y())\n\n self.rect.setParentItem(self.base)\n self.rect.setData(0, self.editor.text())\n\n #print(self.base.childItems())\n #for item in self.base.childItems():\n # if item.data(0) is not None:\n # print(\"==> \", item.data(0))\n\n self.markTabList.addMark(self.editor.text(), \"{0:.4f}, {1:.4f}, 0.0, {2}, {3}, {4:.4f}, {5:.4f}\".format(self.pose['position']['x'],\n self.pose['position']['y'],\n self.pose['orientation']['x'],\n self.pose['orientation']['y'],\n self.pose['orientation']['z'],\n self.pose['orientation']['w']))\n QDialog.accept(self)\n\n def updateUi(self):\n self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(\n bool(self.editor.text()))\n\n # 删除选中的标签点及其对应的信息(包括图标、描述及其在标签栏的显示信息)\n def removeMarkPoint(self, markName): # 根据标签名删除标签\n if self.scene is not None:\n for iter in self.base.childItems(): # 遍历指定的父类item的子item,查找选中的item\n if iter.data(0) == markName:\n self.scene.removeItem(iter)\n\n def saveMarkPoint(self): # 保存所有标签信息到文件中\n pass\n\n","sub_path":"ROSClientGui/ui/MarkItem.py","file_name":"MarkItem.py","file_ext":"py","file_size_in_byte":5006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"529268686","text":"from data_algebra.data_ops import *\n\n\ndef test_narrow():\n ops = (\n TableDescription(\n table_name=\"stocks\",\n column_names=[\"date\", \"trans\", \"symbol\", \"qty\", \"price\"],\n )\n .extend({\"cost\": \"qty * price\"})\n .select_columns([\"date\", \"cost\"])\n )\n cused = ops.columns_used()\n assert cused[\"stocks\"] == {\"date\", \"price\", \"qty\"}\n","sub_path":"tests/test_narrow.py","file_name":"test_narrow.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"351985765","text":"__author__ = 'hook'\n\nimport sys\nimport socket\nimport paramiko\nimport traceback\nimport interactive\nimport threading\n\n\n# setup login\ndef ssh_connect(hostname, port, username, password, client):\n paramiko.util.log_to_file('terminal.log')\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((hostname, port))\n except Exception as e:\n print('*** connection failed' + str(e))\n traceback.print_exc()\n sys.exit(1)\n try:\n t = paramiko.Transport(sock)\n try:\n t.start_client()\n except paramiko.SSHException:\n print('***SSH negotiation failed')\n sys.exit(1)\n if not t.is_authenticated():\n t.auth_password(username, password)\n if not t.is_authenticated():\n print('*** Authentication failed.')\n t.close()\n sys.exit(1)\n chan = t.open_session()\n chan.get_pty()\n chan.invoke_shell()\n print('*** Here wo go!\\n')\n interactive.posix_shell(chan, client)\n chan.close()\n t.close()\n except Exception as e:\n print('*** Caught exception:' + str(e.__class__) + ':' + str(e))\n traceback.print_exc()\n sys.exit(1)\n#if __name__ == \"__main__\":\n\n\ndef test(obj):\n shell = threading.Thread(target=ssh_connect, args=('198.35.44.102', 29677, 'hook', '0505', obj))\n shell.start()\n\n\n\n\n\n\n\n\n","sub_path":"terminal.py","file_name":"terminal.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"138803987","text":"import random\n\nlosowa = 1\nn = 0\nwhile losowa != 666:\n n += 1\n losowa = random.randint(1, 99999)\n print(losowa, n)\nprint(\"zajelo\", n, \"generowan nim losowa = 666\")\n# for i in range(1, 11):\n# print(i)\n","sub_path":"main/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"152687582","text":"from django.shortcuts import get_object_or_404, render, redirect, reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage, InvalidPage\nfrom django.forms import ModelForm\nfrom django.http import HttpResponse\nfrom django.conf import settings\nimport uuid\nimport os\n\nfrom .models import DelovniList, Naloga\nfrom .generatorji.latex_generator import LatexGenerator\nfrom .generatorji.obrazec_generator import ObrazecGenerator\n\ndef index(request):\n return render(request, 'landing_page.html')\n\n@login_required\ndef seznam_dokumentov(request):\n # Ker ima uporabnik lahko vecje stevilo dokumentov je smiselno seznam\n # razdeliti na strani. Ce je v URL dodan GET parameter stran, potem\n # uporabniku prikazemo doloceno stran, sicer pa mu prikazemo kar prvo stran\n # z dokumenti.\n trenutna_stran = request.GET.get('stran', 1)\n\n # Dobimo seznam vseh dokumentov, ki pripadajo uporabniku, uredimo jih po\n # datumu zadnje spremembe.\n delovni_listi = DelovniList.objects.filter(lastnik=request.user).order_by('-updated_at')\n paginator = Paginator(delovni_listi, settings.STEVILO_DELOVNIH_LISTOV_NA_STRAN)\n\n # Pri pridobivanju dolocene strani lahko pride do napake, ce trenutna stran\n # ni stevilka ali ce je uporabnik presegel obseg svojih dokumentov. Ce\n # uporabnik posreduje neveljavno stran mu prikazemo kar prvo stran. Ce\n # uporabnik preseze stevilo strani, mu prikazemo kar zadnjo stran.\n try:\n seznam_delovnih_listov = paginator.page(trenutna_stran)\n except PageNotAnInteger:\n seznam_delovnih_listov = paginator.page(1)\n except EmptyPage:\n seznam_delovnih_listov = paginator.page(paginator.num_pages)\n except InvalidPage:\n seznam_delovnih_listov = paginator.page(1)\n\n return render(request, 'testi/seznam_dokumentov.html', {'delovni_listi': seznam_delovnih_listov})\n\n@login_required\ndef podrobnosti_delovnega_lista(request, id_delovnega_lista: int):\n # Najprej poiscemo delovni list glede na prejet id. Ce uporabnik nima\n # pravice za ogled dokumenta vrnemo napako\n delovni_list: DelovniList = get_object_or_404(DelovniList, pk=id_delovnega_lista)\n\n if not delovni_list.lahko_vidi(request.user):\n raise PermissionDenied\n\n return render(request, 'testi/podrobnosti_dokumenta.html', {'delovni_list': delovni_list})\n\n@login_required\ndef odstranjevanje_delovnega_lista(request, id_delovnega_lista: int):\n # Najprej poiscemo delovni list glede na prejet id\n delovni_list: DelovniList = get_object_or_404(DelovniList, pk=id_delovnega_lista)\n\n # Nato preverimo ali ima uporabnik sploh pravico do urejanja dokumenta. Ce\n # je nima, sprozimo Exception.\n if not delovni_list.lahko_ureja(request.user):\n raise PermissionDenied\n\n # Uporabniku sporocimo, da je bil delovni list uspesno izbrisan s pomocjo\n # django.messages knjiznice\n messages.add_message(request, messages.INFO, 'Delovni list \"{}\" je bil uspešno izbrisan.'.format(delovni_list.naslov))\n\n # Delovni list dejansko izbrisemo, vse povezane naloge se izbrisejo, ker je\n # brisanje kaskadno\n delovni_list.delete()\n\n # Uporabnika preusmerimo na seznam dokumentov, kjer se mu prikaze obvestilo,\n # da je bil delovni list izbrisan\n return redirect(reverse('naloge:seznam_dokumentov'))\n\n@login_required\ndef ustvari_delovni_list(request):\n # Ustvarimo nov delovni list in uporabnika preusmerimo na stran za urejanje\n # tega delovnega lista\n nov_delovni_list = DelovniList.prazen_dokument(request.user)\n nov_delovni_list.save()\n return redirect(reverse('naloge:urejanje_delovnega_lista', kwargs={'id_delovnega_lista' : nov_delovni_list.id }))\n\nclass NalogaForm(ModelForm):\n class Meta:\n model = Naloga\n fields = ['generator', 'stevilo_primerov', 'navodila']\n\n@login_required\ndef dodaj_nalogo(request, id_delovnega_lista: int):\n # Edina dovoljena metoda za dodajanje naloge je POST\n if request.method != 'POST':\n return HttpResponse(status=400)\n \n # Najprej poiscemo delovni list glede na prejet id\n delovni_list: DelovniList = get_object_or_404(DelovniList, pk=id_delovnega_lista)\n \n # Preverimo ali ima uporabnik urejevalni dostop do delovnega lista.\n if not delovni_list.lahko_ureja(request.user):\n raise PermissionDenied\n \n # Glede na prejete POST podatke zapolnimo Naloga form. Ce je ta veljavna, ji\n # dodamo se delovni list in podatke shranimo.\n naloga_form: NalogaForm = NalogaForm(request.POST)\n if naloga_form.is_valid():\n naloga = naloga_form.save(commit=False)\n naloga.delovni_list = delovni_list\n naloga.save()\n return render(request, 'naloge/naloga.html', { 'naloga': naloga })\n \n return HttpResponse(status=400)\n\n@login_required\ndef uredi_nalogo(request):\n\n if request.method != 'POST':\n return HttpResponse(status=400)\n\n # Pri posiljanju forme za urejanje naloge se na streznik posreduje id\n # naloge, ki jo zeli uporabnik urediti. Ce id naloge ni posredovan na\n # streznik vrnemo napako.\n naloga_id = request.POST.get('naloga_id', None)\n if naloga_id is None:\n return HttpResponse(status=400)\n\n # Preverimo ali obstaja naloga z iskanim id v bazi podatkov\n try:\n naloga = Naloga.objects.get(pk=naloga_id)\n except Exception:\n return HttpResponse(status=400)\n \n # Preverimo ali ima trenutno prijavljeni uporabnik sploh pravico urejati\n # nalogo. Ker je vsaka naloga le na ENEM delovnem listu, lahko preverimo ali\n # ima uporabnik sploh pravico urejati delovni list.\n if not naloga.delovni_list.lahko_ureja(request.user):\n raise PermissionDenied\n\n # Ko enkrat najdemo nalogo, iz requesta najdemo se akcijo, ki jo zeli\n # uporabnik izvesti. Ta je lahko ena izmed naslednjih moznosti:\n # * odstrani_nalogo - izbrisi nalogo iz delovnega lista\n # * premakni_gor - premakni nalogo eno mesto navzgor v delovnem listu\n # * premakni_dol - premakni nalogo eno mesto navzdol v delovnem listu\n # * ponovno_generiraj - ponovno generiraj primere naloge\n # * dodaj_primer - dodaj en primer k nalogi\n # * odstrani_primer - odstrani primer z indeksom i iz naloge\n # * uredi_nalogo - sprejmi podatke obrazca za urejanje naloge, spremeni\n # podatke naloge in ponovno generiraj nalogo\n action = request.POST.get('action', None)\n if action is None:\n return HttpResponse(status=400)\n\n # Glede na prejeto akcijo izvedi ustrezno dejanje in podatke shrani v bazo\n if action == 'odstrani_nalogo':\n naloga.delete()\n elif action == 'premakni_gor':\n naloga.premakni_gor()\n elif action == 'premakni_dol':\n naloga.premakni_dol()\n elif action == 'ponovno_generiraj':\n naloga.ponovno_generiraj()\n return render(request, 'naloge/naloga.html', { 'naloga': naloga })\n elif action == 'dodaj_primer':\n naloga.dodaj_primer()\n return render(request, 'naloge/naloga.html', { 'naloga': naloga })\n elif action == 'odstrani_primer':\n # Odstranjevanje primera zahteva se dodaten parameter indeks\n indeks = request.POST.get('indeks', '')\n try:\n indeks = int(indeks)\n except Exception:\n return HttpResponse(status=400)\n \n naloga.odstrani_primer(indeks)\n return render(request, 'naloge/naloga.html', { 'naloga': naloga })\n elif action == 'uredi_nalogo':\n obrazec = ObrazecGenerator.generiraj_obrazec(naloga, request)\n if obrazec.is_valid():\n naloga.posodobi_podatke(obrazec.cleaned_data)\n return render(request, 'naloge/naloga.html', { 'naloga': naloga })\n \n return HttpResponse(status=200)\n\nclass DelovniListForm(ModelForm):\n class Meta:\n model = DelovniList\n fields = ['naslov', 'opis']\n\n@login_required\ndef urejanje_delovnega_lista(request, id_delovnega_lista: int):\n delovni_list: DelovniList = get_object_or_404(DelovniList, pk=id_delovnega_lista)\n\n if not delovni_list.lahko_ureja(request.user):\n raise PermissionDenied\n\n if request.method == 'POST':\n # Ce je uporabnik izpolnil formo za urejanje delovnega lista, posodobimo\n # podatke delovnega lista. Pri tem pa moramo paziti, da ne klicemo\n # neposredno delovni_list_form.save(), saj ima nas obrazec za urejanje\n # delovnega lista le dve polji - naslov in opis, namesto vseh zahtevanih\n # polj obrazca. Tako posodobimo le ustrezna polja v bazi.\n delovni_list_form: DelovniListForm = DelovniListForm(request.POST)\n if delovni_list_form.is_valid():\n delovni_list.naslov = delovni_list_form.cleaned_data['naslov']\n delovni_list.opis = delovni_list_form.cleaned_data['opis']\n delovni_list.save()\n return redirect(reverse('naloge:podrobnosti_delovnega_lista', kwargs={'id_delovnega_lista' : delovni_list.id }))\n \n # Ker zelimo uporabniku ob izbiri tipa naloge v spustnem seznamu ob\n # dodajanju naloge ponuditi privzeta navodila, sestavimo JSON objekt z\n # navodili vseh nalog, ki jih v dokumentu prikazemo z uporabo JavaScript.\n navodila = {}\n for generator, generator_razred in Naloga.GENERATOR_DICT.items():\n navodila[generator] = generator_razred.NAVODILA\n \n delovni_list_form: DelovniListForm = DelovniListForm(instance=delovni_list)\n naloga_form: NalogaForm = NalogaForm(initial={ 'stevilo_primerov': 4 })\n return render(request, 'testi/urejanje_dokumenta.html', {\n 'delovni_list': delovni_list,\n 'naloga_form': naloga_form,\n 'delovni_list_form': delovni_list_form,\n 'navodila': navodila\n })\n\n@login_required\ndef generiraj_delovni_list(request, id_delovnega_lista: int):\n delovni_list: DelovniList = get_object_or_404(DelovniList, pk=id_delovnega_lista)\n\n if not delovni_list.lahko_vidi(request.user):\n raise PermissionDenied\n\n # Pri generiranju pdf dokumenta najprej ustvarimo \"unikatno\" (uuid4 sicer v\n # teoriji ni ravno unikaten, v praksi pa naceloma je) ime. To uporabimo za\n # shranjevanje pdf dokumentov na disk.\n random_name = uuid.uuid4()\n\n # Sestavimo pot do datoteke na disku kamor bomo shranili pdf dokument.\n ime_datoteke = os.path.join(settings.MEDIA_ROOT, str(random_name))\n\n # S pomocjo LaTeX generatorja zgeneriramo delovni list in ga shranimo\n dokument = LatexGenerator.generiraj_latex_dokument(delovni_list)\n dokument.generate_pdf(ime_datoteke, clean=True, clean_tex=True)\n\n # Uporabnika preusmerimo na URL za dostop do pdf dokumenta\n return redirect(settings.MEDIA_URL + str(random_name) + '.pdf')","sub_path":"nadlogar/naloge/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"538469502","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : Thu Jan 3 14:06:28 2019\n# @Author : JRP - Ruipeng Jia\n\n# from pytorch_pretrained_bert import BertModel, GPT2Model\nfrom pytorch_pretrained_bert import BertModel\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\nfrom config import opt\nfrom utils.utils_embedding import build_embedding_matrix\nfrom utils.utils import init_linear_orthogonal_, init_rnn_orthogonal_\n\n\nclass EncoderGRU(nn.Module):\n\n def __init__(self, args):\n super(EncoderGRU, self).__init__()\n self.args = args\n if self.args.word_vec is not None:\n embedding_matrix = build_embedding_matrix(self.args)\n self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix).float(), freeze=self.args.freeze) # .float() is important because rnn prefer to float\n elif self.args.bert:\n model = BertModel.from_pretrained(opt.home + '/datasets/WordVec/pytorch_pretrained_bert/bert-base-uncased/').to(opt.device)\n self.embed = lambda x: model(x)[0][-1]\n # elif self.args.gpt2:\n # model = GPT2Model.from_pretrained(opt.home + 'datasets/WordVec/pytorch_pretrained_bert/gpt2/')\n # self.embed = lambda x: model(x)[0]\n else:\n self.embed = nn.Embedding(self.args.max_vocab_size, self.args.embed_dim)\n self.gru = nn.GRU(self.args.embed_dim, self.args.hidden_dim, bidirectional=True)\n\n ## use reduce for bidirectional\n self.reduce_output = nn.Linear(self.args.hidden_dim * 2, self.args.hidden_dim)\n self.reduce_hidden = nn.Linear(self.args.hidden_dim * 2, self.args.hidden_dim)\n\n if self.args.orthogonal:\n init_linear_orthogonal_(self.reduce_hidden)\n init_linear_orthogonal_(self.reduce_output)\n init_rnn_orthogonal_(self.gru)\n\n def forward(self, input, input_lens):\n # input: (B, L)\n # input_lens: (B), should be in descending order\n input = input.t()\n input = self.embed(input) # (L, B) -> (L, B, D)\n\n packed = pack_padded_sequence(input, input_lens)\n output, hidden = self.gru(packed) # (2, B, H)\n\n ## process output && hidden\n output, _ = pad_packed_sequence(output) # (L, B, 2*H)\n output = output.transpose(0, 1)\n output = self.reduce_output(output) # (B, L, H)\n hidden = F.relu(self.reduce_hidden(hidden.view(1, -1, self.args.hidden_dim * 2))) # (1, B, H)\n # hidden = output[:, -1, :].unsqueeze(0) # error with not contiguous...\n\n return output, hidden # (B, L, H), (1, B, H)\n","sub_path":"bin/template/src/jptproject/l5_2018_12_Pytorch_Summarization_with_Pointer-Generator_Networks/layers/encoder_gru.py","file_name":"encoder_gru.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"395688910","text":"#\n# Copyright 2014 NEC Corporation. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\n\nimport six\nfrom stevedore import extension\n\nfrom ceilometer.central import plugin\nfrom ceilometer import sample\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass _Base(plugin.CentralPollster):\n\n NAMESPACE = 'network.statistics.drivers'\n extension_manager = extension.ExtensionManager(namespace=NAMESPACE,\n invoke_on_load=True)\n\n @abc.abstractproperty\n def meter_name(self):\n '''Return a Meter Name.'''\n\n @abc.abstractproperty\n def meter_type(self):\n '''Return a Meter Type.'''\n\n @abc.abstractproperty\n def meter_unit(self):\n '''Return a Meter Unit.'''\n\n def get_samples(self, manager, cache, resources=[]):\n for resource in resources:\n sample_data = self.extension_manager.map_method('get_sample_data',\n self.meter_name,\n resource,\n cache)\n for data in sample_data:\n if data is None:\n continue\n if not isinstance(data, list):\n data = [data]\n for (volume, resource_id,\n resource_metadata, timestamp) in data:\n\n yield sample.Sample(\n name=self.meter_name,\n type=self.meter_type,\n unit=self.meter_unit,\n volume=volume,\n user_id=None,\n project_id=None,\n resource_id=resource_id,\n timestamp=timestamp,\n resource_metadata=resource_metadata\n )\n","sub_path":"ceilometer/network/statistics/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"604560852","text":"import numpy as np\nimport cv2\nimport tensorflow as tf\nfrom tensorflow.keras.models import load_model\n\nmodel = load_model('my_model.h5')\n\nimage = cv2.imread('image/path', cv2.IMREAD_COLOR)/255\n\n(sx, sy, ch) = image.shape\nmodel_input = (64, 64, 3)\n\ncell_size = 256\n\n# Reshape image shapes to multiples of 32\nre_x = sx % cell_size\nre_y = sy % cell_size\nimage = image[:(sx-re_x), :(sy-re_y)]\n\nstep_x, step_y = image.shape[0]//cell_size, image.shape[1]//cell_size\n\npred_list = []\nacc = []\n\nglobal prediction\n\nfor y in range(0, image.shape[1], cell_size):\n for x in range(0, image.shape[0], cell_size):\n sub_im = cv2.resize(image[x:x+cell_size, y:y+cell_size, :], (64, 64), cv2.INTER_LINEAR)\n sub_im = sub_im[np.newaxis, :, :, :]\n prediction = model.predict(sub_im)\n score = prediction[0, np.argmax(prediction)]\n pred = np.argmax(prediction)\n acc.append(score)\n pred_list.append(pred)\n\n\npred_list = np.array(pred_list).reshape(step_x, step_y)\nacc = np.array(acc).reshape(step_x, step_y)\n","sub_path":"obj-detection.py","file_name":"obj-detection.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"218998110","text":"from lda_cocurrence.cocurrenceInsight import KeyWord\nfrom tqdm import tqdm\nimport joblib\nimport os\n\n#词共现实验需要准备 词-词频映射关系map 每个qq上面的身上的所有关键词 list\ndef cocurrence_process(name,redo = False):\n if ( not redo ) and os.path.exists('./temp/{}-vocab.job'.format(name)):\n return\n filename = './files/{}-qqgroup'.format(name)\n data = open(filename, 'r', encoding='utf-8').readlines()\n qq2keywords = []\n vocab = {}\n for oneline in tqdm(data):\n onelines = oneline.split('#')\n #qq = oneline[0]\n #有用户身上没有关键词\n if (oneline[1].strip() == ''):\n continue\n words = onelines[1].split(',')\n wordlst = []\n # 词共现在main函数中实现,因为如果先把共现关系保存到数据结构,用joblib保存太慢,还不如直接计算\n for w in words:\n if (w.strip() == ''):\n continue\n w1 = w.split('/')[0]\n wordlst.append(w1)\n if w1 not in vocab.keys():\n vocab[w1] = KeyWord(w1)\n vocab[w1].count += 1\n qq2keywords.append(wordlst)\n joblib.dump(vocab,'./temp/{}-vocab.job'.format(name))\n joblib.dump(qq2keywords,'./temp/{}-qq2keywords.job'.format(name))\n\n\n#lda 需要把低频词去掉(高频词也可以去掉),然后准备 词-id的映射。准备 文档的onehot表示\ndef lda_process(name,redo = False):\n if ( not redo ) and os.path.exists('./temp/{}-word2idx.job'.format(name)):\n return\n filename = './files/{}-qqgroup'.format(name)\n data = open(filename, 'r', encoding='utf-8').readlines()\n qq2keywords = []\n vocab = {}\n #统计词频和 qq-keyword映射\n for oneline in tqdm(data):\n onelines = oneline.split('#')\n #去掉没有关键词的用户\n if (onelines[1].strip() == ''):\n continue\n words = onelines[1].split(',')\n wordlst = []\n for w in words:\n w1 = w.split('/')[0]\n wordlst.append(w1)\n if w1 not in vocab.keys():\n vocab[w1] = 1\n else:\n vocab[w1] += 1\n\n qq2keywords.append(wordlst)\n id = 0\n vocab_after_pre = {}\n #l = sorted(vocab.items(),reverse = True,key = lambda x:x[1])\n for x, y in vocab.items():\n if y > 2:\n vocab_after_pre[x] = id\n id += 1\n\n joblib.dump(vocab_after_pre, './temp/{}-word2idx.job'.format(name))\n joblib.dump(qq2keywords, './temp/{}-wordslst.job'.format(name))","sub_path":"lda_cocurrence/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"347966788","text":"\n\nfrom xai.brain.wordbase.nouns._kilometre import _KILOMETRE\n\n#calss header\nclass _KILOMETRES(_KILOMETRE, ):\n\tdef __init__(self,): \n\t\t_KILOMETRE.__init__(self)\n\t\tself.name = \"KILOMETRES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"kilometre\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_kilometres.py","file_name":"_kilometres.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"616852458","text":"\"\"\"Functions to read from from database tables used to log an optimization.\"\"\"\nimport io\nimport traceback\nimport warnings\n\nimport pandas as pd\nfrom sqlalchemy.sql.sqltypes import BLOB\n\n\ndef read_last_iterations(database, tables, n, return_type):\n \"\"\"Read the last n iterations from all tables.\n\n If a table has less than n obervations, all observations are returned.\n\n Args:\n database (sqlalchemy.MetaData)\n tables (list): List of tables names.\n n (int): number of rows to retrieve\n return_type (str): one of \"list\", \"pandas\", \"bokeh\"\n - \"list\": A list of lists. The first sublist are the columns. The remaining\n sublists are retrieved rows.\n - \"pandas\": A dataframe.\n - \"bokeh\": A dictionary that can be used to stream to a ColumnDataSource.\n It has one key per column and the corresponding values are lists that\n contain the data of that column.\n\n Returns:\n result (dict or return_type):\n If ``tables`` has only one entry, return the last iterations of that table,\n converted to return_type. If ``tables`` has several entries, return a\n dictionary with one entry per table.\n\n \"\"\"\n if isinstance(tables, (str, int)):\n tables = [tables]\n # sqlalchemy fails silently with many numpy integer types, e.g. np.int64.\n n = int(n)\n\n selects = []\n for table in tables:\n tab = database.tables[table]\n sel = tab.select().order_by(tab.c.iteration.desc()).limit(n)\n selects.append(sel)\n\n raw_results = _execute_select_statements(selects, database)\n ordered_results = [res[::-1] for res in raw_results]\n\n result = _process_selection_result(database, tables, ordered_results, return_type)\n return result\n\n\ndef read_new_iterations(database, tables, last_retrieved, return_type, limit=None):\n \"\"\"Read all iterations after last_retrieved.\n\n Args:\n database (sqlalchemy.MetaData)\n tables (list): List of tables names.\n last_retrieved (int): The last iteration that was retrieved.\n return_type (str): one of \"list\", \"pandas\", \"bokeh\"\n limit (int): Only the first ``limit`` rows will be retrieved. Default None.\n\n Returns:\n result (dict or return_type):\n If ``tables`` has only one entry, return the last iterations of that table,\n converted to return_type. If ``tables`` has several entries, return a\n dictionary with one entry per table.\n int: The new last_retrieved value.\n\n \"\"\"\n if isinstance(tables, (str, int)):\n tables = [tables]\n # sqlalchemy fails silently with many numpy integer types, e.g. np.int64.\n last_retrieved = int(last_retrieved)\n limit = int(limit)\n\n selects = []\n for table in tables:\n tab = database.tables[table]\n sel = tab.select().where(tab.c.iteration > last_retrieved).limit(limit)\n selects.append(sel)\n\n raw_results = _execute_select_statements(selects, database)\n if len(raw_results[0]) > 0:\n new_last = raw_results[0][-1][0]\n else:\n new_last = last_retrieved\n result = _process_selection_result(database, tables, raw_results, return_type)\n return result, new_last\n\n\ndef read_scalar_field(database, table):\n \"\"\"Read the value of a table with one row and one column called \"value\".\n\n Args:\n database (sqlalchemy.MetaData)\n table (str): Name of the table.\n\n \"\"\"\n sel = database.tables[table].select()\n res = _execute_select_statements(sel, database)[0][0][0]\n if isinstance(database.tables[table].c.value.type, BLOB):\n res = pd.read_pickle(io.BytesIO(res), compression=None)\n return res\n\n\ndef _execute_select_statements(statements, database):\n \"\"\"Execute a list of select statements in one atomic transaction.\n\n If any statement fails, the transaction is rolled back, and a warning is issued.\n\n Args:\n statements (list or sqlalchemy statement): List of sqlalchemy select statements.\n database (sqlalchemy.MetaData): The bind argument must be set.\n\n\n Returns:\n result (list): List of selection results. A selection result is a list of\n tuples where each tuple is a selected row.\n\n \"\"\"\n if not isinstance(statements, (list, tuple)):\n statements = [statements]\n\n results = []\n engine = database.bind\n conn = engine.connect()\n # acquire lock\n trans = conn.begin()\n try:\n for stat in statements:\n res = conn.execute(stat)\n results.append(list(res))\n res.close()\n # release lock\n trans.commit()\n conn.close()\n except (KeyboardInterrupt, SystemExit):\n trans.rollback()\n conn.close()\n raise\n except Exception:\n exception_info = traceback.format_exc()\n warnings.warn(\n \"Unable to read from database. Try again later. The traceback was:\\n\\n\"\n f\"{exception_info}\"\n )\n\n trans.rollback()\n conn.close()\n results = [[] for stat in statements]\n\n return results\n\n\ndef _transpose_nested_list(nested_list):\n \"\"\"Transpose a list of lists.\"\"\"\n return list(map(list, zip(*nested_list)))\n\n\ndef _process_selection_result(database, tables, raw_results, return_type):\n \"\"\"Convert sqlalchemy selection results to desired return_type.\"\"\"\n result = {}\n for table, raw_res in zip(tables, raw_results):\n columns = database.tables[table].columns.keys()\n if return_type == \"list\":\n res = [columns]\n for row in raw_res:\n res.append(list(row))\n elif return_type == \"bokeh\":\n res = dict(zip(columns, _transpose_nested_list(raw_res)))\n if res == {}:\n res = {col: [] for col in columns}\n elif return_type == \"pandas\":\n res = pd.DataFrame(data=raw_res, columns=columns).set_index(\"iteration\")\n result[table] = res\n\n if len(tables) == 1:\n result = list(result.values())[0]\n return result\n","sub_path":"estimagic/logging/read_database.py","file_name":"read_database.py","file_ext":"py","file_size_in_byte":6051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"244625576","text":"def selection_sort(ulist):\n length = len(ulist)\n\n for i in range(length-1):\n k = i\n for j in range(i+1, length):\n if ulist[j] < ulist[k]:\n k = j\n ulist[i], ulist[k] = ulist[k], ulist[i]\n\n return ulist\n\n\n# 4 n-1\n# 5 n-1\n# 6 (n(n-1)) / 2\n# 7 ((n(n-1)) / 2) + 1\n# 8 ((n(n-1)) / 2) + 1\n# 9 n-1\n\n# O(n^2) because of 6, 7 and 8 lines\n","sub_path":"clrs/two_two_two.py","file_name":"two_two_two.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"211403708","text":"# -*- coding: utf-8 -*- \nimport MeCab, csv, os\nimport glob, pandas as pd, numpy as np\nm = MeCab.Tagger('-d /usr/local/lib/mecab/dic/mecab-ko-dic')\n#os.chdir(\"/Users/noyeongdan/data\")\n\ncategory_, date_, content_ = [], [], []\nfile_name = ['Article_경제','Article_사회','Article_생활문화','Article_세계','Article_정치','Article_IT과학']\n\n'''def register_dic(f_name):\n files = glob.glob(root_dir+\"/\"+f_name+\"/*.csv\", recursive=True)\n for i in files:\n file_to_ids(f_name)'''\n\n\ndef file_to_ids(fname):\n #name = fname.split(\".csv\")[0]\n #name_array = name.split('/')\n #f_name = name_array[3] #파일이름\n \n #tags for tokenizer\n #tag_classes = ['NNG', 'NNP','VA', 'VV+EC', 'XSV+EP', 'XSV+EF', 'XSV+EC', 'VV+ETM', 'MAG', 'MAJ', 'NP', 'NNBC', 'IC', 'XR', 'VA+EC']\n tag_classes = ['NNG', 'NNP','VA', 'VV+EC', 'VV+ETM', 'MAJ', 'XR', 'VA+EC']\n #데이터 읽어오고.\n data = pd.read_csv(fname+'.csv')\n #각각 분류\n title = data.iloc[:,3].values\n date = data.iloc[:, 0].values\n content = data.iloc[:, 4].values\n\n for cnt, value in enumerate(title):\n result = ''\n value = m.parseToNode(str(title[cnt]).strip() + str(content[cnt]).strip())\n while value:\n tag = value.feature.split(\",\")[0]\n word = value.feature.split(\",\")[3]\n if tag in tag_classes:\n if word == \"*\": value = value.next\n result += word.strip()+\" \"\n value = value.next\n content_.append(result)\n date_.append(date[cnt])\n #category\n if '경제' in fname : category_.append(\"0\")\n if '사회' in fname : category_.append(\"1\")\n if '생활문화' in fname : category_.append(\"2\")\n if '세계' in fname : category_.append(\"3\")\n if '정치' in fname : category_.append(\"4\")\n if 'IT과학' in fname : category_.append(\"5\")\n\ndef save(month, file_path, f_name):\n if not os.path.exists(file_path):\n os.mkdir(file_path)\n with open(file_path+\"/\"+f_name+'_after_prepro.csv', 'a') as f:\n writer = csv.writer(f)\n \n for cnt, i in enumerate(content_):\n if f_name=='Article_경제' and category_[cnt]=='0':\n date__ = date_[cnt]\n content__ = content_[cnt]\n category__ = category_[cnt]\n writer.writerow((date__, content__, category__))\n elif f_name=='Article_사회' and category_[cnt]=='1':\n date__ = date_[cnt]\n content__ = content_[cnt]\n category__ = category_[cnt]\n writer.writerow((date__, content__, category__))\n elif f_name=='Article_생활문화' and category_[cnt]=='2':\n date__ = date_[cnt]\n content__ = content_[cnt]\n category__ = category_[cnt]\n writer.writerow((date__, content__, category__))\n elif f_name=='Article_세계' and category_[cnt]=='3':\n date__ = date_[cnt]\n content__ = content_[cnt]\n category__ = category_[cnt]\n writer.writerow((date__, content__, category__))\n elif f_name=='Article_정치' and category_[cnt]=='4':\n date__ = date_[cnt]\n content__ = content_[cnt]\n category__ = category_[cnt]\n writer.writerow((date__, content__, category__))\n elif f_name=='Article_IT과학' and category_[cnt]=='5':\n date__ = date_[cnt]\n content__ = content_[cnt]\n category__ = category_[cnt]\n writer.writerow((date__, content__, category__))\n #for cnt, i in enumerate(content_):\n # print(category_[cnt])\n # if category_[cnt]==0:\n # print('int')\n # elif category_[cnt]=='0':\n # print('string')\n\nfor name in file_name:\n file_to_ids(name)\n #register_dic(name)\n save(name, './data_after_preprocessing_data', name)\nprint(\"형태소 분석완료!\")\n\nimport os\nimport csv\n \nos.chdir(\"./data_after_preprocessing_data\") # Csv가 있는 파일 위치 수정\n \ncategory = ['경제','사회','생활문화','세계','정치','IT과학']\n \nfile_unity = open('after_prepro.csv', 'w')\nwcsv = csv.writer(file_unity)\n \nfor category_element in category:\n file = open('Article_'+category_element+'_after_prepro.csv', 'r')\n line = csv.reader(file)\n print(file)\n try:\n for line_text in line:\n wcsv.writerow([line_text[0], line_text[1], line_text[2]])\n except:\n pass\nprint(\"파일 합치기 완료!\")","sub_path":"AISpring2/src/main/webapp/WEB-INF/views/Mecab.py","file_name":"Mecab.py","file_ext":"py","file_size_in_byte":4625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"583576833","text":"__author__ = 'mms'\n\nfrom TwitterSearch import *\nfrom app import app\nimport tweepy\n\n\ndef search(query='cheeky nandos ledge banter', max=5):\n keywords = query.split()\n try:\n tso = TwitterSearchOrder()\n tso.set_keywords(keywords)\n # tso.set_language('en')\n # tso.set_include_entities(False)\n\n ts = TwitterSearch(\n consumer_key=app.config['TWITTER_CONSUMER_KEY'],\n consumer_secret=app.config['TWITTER_CONSUMER_SECRET'],\n access_token=app.config['TWITTER_ACCESS_TOKEN'],\n access_token_secret=app.config['TWITTER_TOKEN_SECRET']\n )\n results = []\n for tweet in ts.search_tweets_iterable(tso):\n results.append(tweet['id'])\n # print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) )\n max -= 1\n if not max: break\n # print results\n return results\n\n except TwitterSearchException as e: # take care of all those ugly errors if there are some\n print(e)\n\n\ndef post(status='New status'):\n auth = tweepy.OAuthHandler(app.config['TWITTER_CONSUMER_KEY'], app.config['TWITTER_CONSUMER_SECRET'])\n auth.set_access_token(app.config['TWITTER_ACCESS_TOKEN'], app.config['TWITTER_TOKEN_SECRET'])\n twitter = tweepy.API(auth)\n twitter.update_status(status=status)\n","sub_path":"libs/tweets.py","file_name":"tweets.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"482968960","text":"x=lambda a,b:a*b\nprint(x(6,6))\n\nx=lambda a,b,c,d:a+b+c+d\nprint(x(3,3,8,6))\n\nmylist=[1,2,3,4,5,6,7,8,9]\nnewlist=list(filter(lambda x:(x%2==0),mylist))\nprint(newlist)\n\nmylist=[1,2,3,4,5,6,7,8,9]\nnewlist=list(filter(lambda x:(x%2!=0),mylist))\nprint(newlist)\n\n\nmylist=[1,3,6,7]\nnewlist=list(map(lambda x:(x*2),mylist))\nprint(newlist)\n\nfrom functools import reduce\ns=reduce(lambda a,b:(a*b),mylist)\nprint(s)","sub_path":"49lambda.py","file_name":"49lambda.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"412377694","text":"#!/usr/bin/python\n\n'''\n\nMain module. It launches all three threads (GUI, buffer consumer and producer).\n\nCreated on 01/08/2016\n\n@author: gciotto\n'''\n\nfrom ClientInterface import ClientInterface\nfrom PyQt4.QtGui import QApplication\nimport sys\nfrom BufferController import BufferController, Command\nimport threading\nimport time\nfrom Control_Node import Network_Nodes\n\n# This thread produces READ commands and enqueues them in the command queue (refer to the BufferController\n# module) every 'refresh_delay' seconds.\ndef reading_command_thread():\n \n while w_command.still_on:\n \n if w_command.should_monitor:\n \n for __node in Network_Nodes.nodes:\n b_controller.enqueue_command(Command(Command.READ, __node))\n \n \n time.sleep(w_command.refresh_delay)\n\n# Main 'function': instantiates a ClientInterface object and starts it. \nif __name__ == '__main__':\n \n reader = threading.Thread(target = reading_command_thread)\n reader.setDaemon(True)\n \n app = QApplication(sys.argv)\n\n w_command = ClientInterface()\n \n b_controller = BufferController(window_controller = w_command)\n b_controller.setDaemon(True)\n \n w_command.buffer_controller = b_controller \n \n w_command.show() \n \n b_controller.start()\n reader.start()\n \n sys.exit(app.exec_())\n","sub_path":"CNPEM - PROSAC Daemon Client/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"449474263","text":"from __future__ import print_function\n\nfrom PTBTextReader import PTBTextReader\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\n\nclass PTBLanguageModel:\n n_steps = 32 # max sentence length\n n_input = 4096 # max dictionary size\n n_hidden = 128 # hidden layer num of features\n\n def __init__(self,learning_rate):\n self.reader = PTBTextReader('./ptb.valid.txt',self.n_steps,self.n_input)\n self.n_input = len(self.reader.words)\n\n self.x = tf.placeholder(\"float\", [None, self.n_steps, self.n_input])\n self.y = tf.placeholder(\"float\", [None, self.n_steps, self.n_input])\n\n self.outputs = self.setupRNN()\n self.optimizer,self.cost = self.setupLoss(learning_rate)\n\n def trainRNN(self,training_iters,batch_size):\n init = tf.global_variables_initializer()\n self.sess = tf.Session()\n self.sess.run(init)\n step = 0\n while step < training_iters:\n batch_x,batch_y = self.reader.getNextSentenceBatch(batch_size)\n self.sess.run(self.optimizer, feed_dict={self.x: batch_x, self.y: batch_y})\n loss = self.sess.run(self.cost, feed_dict={self.x: batch_x, self.y: batch_y})\n print(\"Iter \" + str(step) + \", Minibatch Loss= \" + \"{:.6f}\".format(loss))\n step += 1\n print(\"Optimization Finished!\")\n\n def testRNN(self,word):\n vec = self.reader.getWordVector(word)\n seq = [vec]\n for itr in range(1,self.n_steps):\n seq.append([0.0]*self.n_input)\n for itr in range(self.n_steps-1):\n result = self.sess.run(self.outputs, feed_dict={self.x:[seq]})\n alternative = result[0]\n seq[itr+1] = alternative[itr]\n line = ''\n for itr in range(len(seq)):\n line = line + \" \" + self.reader.getWordWithMaxProb(seq[itr])\n print(\"Generated Sequence : \"+line)\n\n def setupRNN(self):\n x = tf.unstack(self.x, self.n_steps, 1)\n lstm_cell = rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0)\n outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)\n ret = []\n weights = tf.Variable(tf.random_normal([self.n_hidden, self.n_input]))\n biases = tf.Variable(tf.random_normal([self.n_input]))\n for itr1 in range(self.n_steps):\n ret.append( tf.nn.softmax(tf.matmul(outputs[itr1], weights) + biases) )\n retSeq = tf.stack(ret,axis=1)\n return retSeq\n\n def setupLoss(self,learning_rate):\n temp = tf.nn.softmax_cross_entropy_with_logits(logits=self.outputs, labels=self.y, dim=2)\n cost = tf.reduce_mean(temp)\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n return optimizer, cost\n\n\nif __name__ == \"__main__\":\n learner = PTBLanguageModel(learning_rate=0.001)\n learner.trainRNN(500,10)\n learner.testRNN('mr.')\n\n","sub_path":"deeplearning/kaist_lecture/day_1_2/LanguageModel.py","file_name":"LanguageModel.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"356319168","text":"from Generic_BPMDevice import *\n#import sys, os\n#sys.path.insert(0, os.path.abspath('..'))\nfrom pkg_resources import require\nrequire(\"numpy\")\nimport numpy as np\nimport random\n\n\nclass SimulatedBPMDevice(Generic_BPMDevice):\n \"\"\"Simulated BPM device used for testing without the hardware. \n\n All of the abstract methods in the parent class must be overridden. This class has\n access to the RF device used in the testing so that it can read in the signals that\n are supposedly provided to it via it's RF inputs. \n\n Attributes:\n attenuation (float): Attenuation produced by the virtual splitter and cables\n RFSim (RF Simulator Obj) : Reference to an RF simulator \n GateSim (Gate Source Simulator Obj) : Reference to a gate source simulator\n \"\"\"\n\n def __init__(self, rf_sim, damage_level, gatesim=None, progatten=None, noise_mag=0.):\n \"\"\"Initializes the Libera BPM device object and assigns it an ID. \n \n Args:\n rf_sim (RFSignalGenerator Obj): The interface object that has access to an RF device \n this is needed in the simulator so it can access the input values that would \n normally come through the signals supplied to the devices inputs.\n gatesim (Gate_Source Object): The interface object that has access to a Gate Source\n device. This will typically be a simulated GateSource, this is an input to this \n class so it know what signals are being sent to it. \n progatten (Programmable attenuator Object): The interface object that has access to a programmable attenuator\n device. This will typically be a simulated GateSource, this is an input to this \n class so it know how much signals are being attenuated between the RF source and it. \n noise_mag (float): The magnitude of the white noise to be added to the input signals\n Returns: \n \n \"\"\"\n print(\"Simulated BPM device accessed on virtual channel\")\n self.attenuation = 12 # Typical attenuation when using a 4 way splitter and cables\n self.RFSim = rf_sim # Instance of the RF source used, allows the simulator to know what signals are output\n self.GateSim = gatesim # Instance of the Gate device, allows the simulator to know what signals are output\n self.ProgAtten = progatten # Instance of the Programmable attenuator, allows to know about changes to input levels.\n self.epics_id = 'SIMULATED_EPICS'\n self.mac_address = 'SIMULATED'\n self.switch_straight = 3 # The switch setting which corresponds to straight through.\n self.adc_n_bits = 12\n self.num_adcs = 4\n self.noise_mag = noise_mag\n self.damage_level = damage_level\n self.agc = 1\n self.attn = 0\n self.ft = 'Disabled'\n self.kx = 10\n self.ky = 10\n self.delta = 0\n self.attn_wfm = [1,1,1]\n self.switches = 'Auto'\n self.switch_val = 3\n self.dsc = 2\n\n def set_internal_state(self, state_dict):\n pass\n\n def get_attenuation(self):\n return 10\n\n def set_attenuation(self, atten):\n pass\n\n def get_x_position(self):\n \"\"\"Override method, gets the calculated X position of the beam.\n \n Args:\n \n Returns: \n float: X position in mm\n \"\"\"\n if self.ProgAtten is None:\n x_val = 0.0 # With an equal splitter there should be no X shift\n else:\n A_pwr, B_pwr, C_pwr, D_pwr = self.attenuate_inputs(self.RFSim.get_output_power()[0])\n total_power = A_pwr + B_pwr + C_pwr + D_pwr\n x_val = ((A_pwr + D_pwr) - (B_pwr + C_pwr)) / total_power\n return x_val * 10 + (random.random() - 0.5) * self.noise_mag # Scaling to mm and adding noise\n\n def get_y_position(self):\n \"\"\"Override method, gets the calculated X position of the beam.\n \n Args:\n \n Returns: \n float: Y position in mm\n \"\"\"\n if self.ProgAtten is None:\n y_val = 0.0 # With an equal splitter there should be no Y shift\n else:\n A_pwr, B_pwr, C_pwr, D_pwr = self.attenuate_inputs(self.RFSim.get_output_power()[0])\n total_power = A_pwr + B_pwr + C_pwr + D_pwr\n y_val = ((A_pwr + B_pwr) - (C_pwr + D_pwr)) / total_power\n return y_val * 10 + (random.random() - 0.5) * self.noise_mag # Scaling to mm and adding noise\n\n def get_x_sa_data(self, num_vals):\n \"\"\"Override method, gets the calculated X position SA data.\n\n Args:\n num_vals (int): The number of samples to capture\n Returns: \n timestamps (list): floats\n data (list): floats\n \"\"\"\n sa_x_data = []\n sa_x_times = []\n for m in range(num_vals):\n sa_x_data.append((random.random() - 0.5) * self.noise_mag)\n sa_x_times.append(m * 0.1)\n return sa_x_times, sa_x_data\n\n def get_y_sa_data(self, num_vals):\n \"\"\"Override method, gets the calculated Y position SA data.\n\n Args:\n num_vals (int): The number of samples to capture\n Returns: \n timestamps (list): floats\n data (list): floats\n \"\"\"\n sa_y_data = []\n sa_y_times = []\n for m in range(num_vals):\n sa_y_data.append((random.random() - 0.5) * self.noise_mag)\n sa_y_times.append(m * 0.1)\n return sa_y_times, sa_y_data\n\n def get_sa_data(self, num_vals):\n \"\"\"Override method, gets the calculated Y position SA data.\n\n Args:\n num_vals (int): The number of samples to capture\n Returns: \n timestamps (list): floats\n data (list): floats\n \"\"\"\n data = []\n times = []\n for m in range(num_vals):\n data.append((random.random() - 0.5) * self.noise_mag)\n times.append(m * 0.1)\n return times, data, data, data, data\n\n def get_tt_data(self):\n \"\"\"Override method, gets the ABCD TT data.\n\n Args:\n Returns: \n timestamps (list): floats\n data (list): floats\n \"\"\"\n tt_y_data = []\n tt_y_times = []\n for m in range(131072):\n tt_y_data.append((random.random() - 0.5) * self.noise_mag)\n tt_y_times.append(m * 936./500e6)\n return tt_y_times, tt_y_data\n\n def get_adc_data(self, adc_n_bits):\n \"\"\"Override method, gets the ABCD ADC data.\n\n Args:\n adc_n_bits (int): number of bit in the ADC\n Returns: \n timestamps (list): floats\n data (list): floats\n \"\"\"\n adc_max_counts = np.power(2, adc_n_bits)\n times = np.arange(0, 1024. / 117E6, 1./117e6)\n excitation_f = 500e3\n angles = np.mod(times * excitation_f, 1) * 2 * np.pi\n data = (np.sin(angles) + 1) * adc_max_counts / 2 # ADD power sensitivity to sin amplitude?\n return times, data, data, data, data\n\n def get_ft_data(self):\n \"\"\"Override method, gets the ABCD first turn data.\n\n Args:\n Returns: \n timestamps (list): floats\n data (list): floats\n \"\"\"\n adc_n_bits = 16\n adc_max_counts = np.power(2, adc_n_bits)\n times = np.arange(0, 250. / 30e6, 1./30e6)\n data = (np.sin(times) + 1) * adc_max_counts # ADD power sensitivity to sin amplitude?\n return times, data, data, data, data\n\n def get_beam_current(self):\n \"\"\"Override method, gets the beam current read by the BPMs. \n \n By measuring the output power from the RF device, the input power can be assumed, then an equation extracted\n from the Rigol 30303 and Libera BPM device can be used to give an estimate of the current. \n \n Args:\n \n Returns: \n float: Current in mA\n \"\"\"\n current = self.get_input_power() # Gets the current input power\n current = 1000 * (1.1193) ** current # Extracted equation from Rigol3030 vs Libera BPM measurements\n return current\n\n def get_input_power(self):\n \"\"\"Override method, gets the input power of the signals input to the device \n \n If a RF gate is setup then the RF power from the source will be reduced by the duty cycle.\n If a programmable attenuator is setup then the additional attenuation is added to the static 12dB loss.\n In the absence of a programmable attenuator, this function assumes that a standard 4 way splitter is used, \n that combined with the cable losses give an estimated loss of 12 dB. \n This is then taken off of the output power set by the RF device giving the result. \n \n Args:\n \n Returns: \n float: Input power in dBm\n \"\"\"\n\n power_total = self.RFSim.get_output_power()[0] # Gets the power output by the RF, total power into the system\n\n if self.GateSim is not None and self.GateSim.get_modulation_state() is not False:\n # gate source is present and enabled\n dutycycle = self.GateSim.get_pulse_dutycycle() # Get the current duty cycle\n log_cycle = 20 * np.log10(dutycycle) # Convert the duty cycle into dB\n # factor the duty cycle into the power read by the simulated BPM\n power_total = power_total - np.absolute(log_cycle)\n\n if self.ProgAtten is not None:\n A_pwr, B_pwr, C_pwr, D_pwr = self.attenuate_inputs(power_total)\n # Total power into the BPM after each signal is attenuated (dBm)\n power_total = 10 * np.log10(A_pwr + B_pwr + C_pwr + D_pwr)\n\n return power_total - self.attenuation\n\n def get_raw_bpm_buttons(self):\n \"\"\"Override method, gets the raw signal from each BPM.\n \n Args:\n \n Returns: \n int: Raw signal from BPM A\n int: Raw signal from BPM B\n int: Raw signal from BPM C\n int: Raw signal from BPM D\n \"\"\"\n ADC = 1000 * self.get_beam_current() # Gets a linear value for the BPM\n if self.ProgAtten is None:\n raw_A = raw_B = raw_C = raw_D = ADC\n else:\n A_pwr, B_pwr, C_pwr, D_pwr = self.attenuate_inputs(self.RFSim.get_output_power()[0])\n total_power = A_pwr + B_pwr + C_pwr + D_pwr\n raw_A = A_pwr / total_power * ADC\n raw_B = B_pwr / total_power * ADC\n raw_C = C_pwr / total_power * ADC\n raw_D = D_pwr / total_power * ADC\n\n return raw_A, raw_B, raw_C, raw_D\n\n def get_normalised_bpm_buttons(self):\n \"\"\"Override method, gets the normalised signal from each BPM.\n \n Args:\n \n Returns: \n float: Normalised signal from BPM A\n float: Normalised signal from BPM B\n float: Normalised signal from BPM C\n float: Normalised signal from BPM D\n \"\"\"\n if self.ProgAtten is None:\n norm_A = norm_B = norm_C = norm_D = 1\n else:\n A_pwr, B_pwr, C_pwr, D_pwr = self.attenuate_inputs(self.RFSim.get_output_power()[0])\n total_power = A_pwr + B_pwr + C_pwr + D_pwr\n # The *4 is to get back to normalised channels rather than normalise to the total power.\n norm_A = A_pwr / total_power * 4\n norm_B = B_pwr / total_power * 4\n norm_C = C_pwr / total_power * 4\n norm_D = D_pwr / total_power * 4\n return norm_A, norm_B, norm_C, norm_D # Assumes all BPM pickups are equal\n\n def get_device_id(self):\n \"\"\"Override method, gets the type of BPM device that the device is\n \n Args:\n \n Returns: \n str: Device type \n \"\"\"\n return \"Simulated BPM Device\"\n\n def get_adc_sum(self):\n \"\"\"Override method, sum of the raw signals\n\n Returns the sum signal, comprised of the sum of all four raw ADC channels. \n\n Args:\n Returns: \n int: Total ADC counts\n \"\"\"\n a, b, c, d = self.get_raw_bpm_buttons()\n ADC_sum = a + b + c + d # Sums the BPM values used in the simulator\n return ADC_sum\n\n def get_input_tolerance(self):\n \"\"\"Override method, gets the maximum input power the device can take\n\n The devices will break if the input power is too high, as such, each device has their\n own tolerances, this function will return this tolerance. It should be used to ensure \n that the power put into the device is not too high to break the device. \n\n Args:\n\n Returns: \n float: max input power in dBm\n \"\"\"\n return -40 # Max tolerance of the simulated device, as low as the most susceptible real device\n\n def attenuate_inputs(self, power_total):\n A_atten, B_atten, C_atten, D_atten = self.ProgAtten.get_global_attenuation()\n\n # The power delivered into each BPM input after passing through the attenuator.\n # Assuming no losses through cables etc...\n # converted into mW\n A_pwr = 10 ** ((power_total - 6 - A_atten) / 10)\n B_pwr = 10 ** ((power_total - 6 - B_atten) / 10)\n C_pwr = 10 ** ((power_total - 6 - C_atten) / 10)\n D_pwr = 10 ** ((power_total - 6 - D_atten) / 10)\n return A_pwr, B_pwr, C_pwr, D_pwr\n\n def get_performance_spec(self):\n \"\"\"Override method, gets the factory performance specifications.\n \n In order to determine pass/fail criteria, one needs to have something to compare to. \n This function returns the factory specification data ready for comparison.\n \n The following results are present: All results are in um.\n Noise measurements:\n 'noise_10kHz' (fs=10kHz, BW=2kHz, DSC=on, AGC=off)\n 'noise_1MHz' (fs=1MHz(TBT), BW=0.3*fs, DSC=off, AGC=off)\n Beam power dependence: Input is power at the Libera input\n 'Beam_power_dependence_X' (fs=10kHz, DSC=on, AGC=off)\n 'Beam_power_dependence_Y' (fs=10kHz, DSC=on, AGC=off)\n 'Beam_power_dependence_deviation_within_range_X' (fs=10kHz, DSC=on, AGC=off)\n 'Beam_power_dependence_deviation_within_range_Y' (fs=10kHz, DSC=on, AGC=off)\n Fill pattern dependence: (Constant input power of -10dBm at libera input)\n 'Fill_pattern_dependence_X' (T=1/fs, fs=10kHz, DSC=on, AGC=off)\n 'Fill_pattern_dependence_Y' (T=1/fs, fs=10kHz, DSC=on, AGC=off)\n \n Args:\n Returns: \n dict: a set of vectors containing comparison data\n \"\"\"\n specs = {}\n specs['noise_10kHz'] = ([0, -24, -24, -32, -32, -40, -40, -44, -44, -50, -50, -56, -56, -62, -62, -68, -68, -74, -74, -80, -80],\n [0.2, 0.2, 0.3, 0.3, 0.5, 0.5, 1, 1, 2, 2, 4, 4, 5, 5, 10, 10, 20, 20, 50, 50, 100])\n specs['noise_1MHz'] = ([0, -32, -32, -36, -36, -40, -40, -44, -44, -50, -50, -56, -56, -62, -62, -68, -68, -74, -74, -80, -80],\n [3, 3, 5, 5, 6, 6, 8, 8, 15, 15, 30, 30, 50, 50, 150, 150, 300, 300, 600, 600, 1500])\n specs['Beam_power_dependence_X'] = ([0, -2, -2, -56, -56, -68, -68, -74, -74, -80, -80],\n [0, 0, 1, 1, 2, 2, 10, 10, 20, 20, 50])\n specs['Beam_power_dependence_Y'] = ([0, -2, -2, -56, -56, -68, -68, -74, -74, -80, -80],\n [0, 0, 1, 1, 2, 2, 10, 10, 20, 20, 50])\n specs['Beam_power_dependence_deviation_within_range_X'] = \\\n ([[0, -8], [-8, -20], [-20, -32], [-32, -40], [-40, -56], [-56, -68], [-68, -70]],\n [1, 1, 1, 1, 1, 5, 50])\n specs['Beam_power_dependence_deviation_within_range_Y'] = \\\n ([[0, -8], [-8, -20], [-20, -32], [-32, -40], [-40, -56], [-56, -68], [-68, -70]],\n [1, 1, 1, 1, 1, 5, 50])\n\n specs['Fill_pattern_dependence_X'] = ([20, 100], 1)\n specs['Fill_pattern_dependence_Y'] = ([20, 100], 1)\n\n return specs\n","sub_path":"BPMDevice/Simulated_BPMDevice.py","file_name":"Simulated_BPMDevice.py","file_ext":"py","file_size_in_byte":16145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"308505615","text":"import matplotlib\r\nmatplotlib.use('Agg')\r\nimport numpy as np\r\nimport pandas as pd\r\nimport re\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.metrics import silhouette_score\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\n\r\nwinedata = pd.read_csv('wine.csv',index_col = 0,header = 0, encoding = 'utf-8')\r\n\r\n# Extract year from title column\r\nwinedata['year'] = winedata['title'].str.findall('(\\d{4})').str[-1]\r\n\r\n# Drop related columns\r\nwinedata_clear = winedata.drop(['province', 'region_1', 'region_2', 'taster_name', 'taster_twitter_handle'], axis = 1)\r\n\r\n# Drop the row if its price is a missing value; For categorical, replace missing value to string \"others\"\r\nwinedata_clear.dropna(subset = ['price', 'year'], inplace = True)\r\nwinedata_clear.fillna('Others',inplace=True)\r\n\r\nwinedata_clear['year'] = winedata_clear['year'].astype('int64')\r\nwinedata_clear = winedata_clear.loc[(winedata_clear['year'] >= 1950) & (winedata_clear['year'] <= 2019)]\r\nwinedata_clear.reset_index(inplace = True, drop = True)\r\n\r\ndef no_number(tokens):\r\n r = re.sub('(\\d)+', 'NUM', tokens.lower())\r\n return r\r\n \r\n# Use tf-idf method to tokenize the column \"description\"\r\n\r\ncommon_stopwords = [\"a\", \"about\", \"above\", \"after\", \"again\", \"against\", \"all\", \"am\", \"an\", \"and\", \"any\", \"are\", \"aren't\",\r\n \"as\", \"at\", \"be\", \"because\", \"been\", \"before\", \"being\", \"below\", \"between\", \"both\", 'but', 'by', \"can't\",\r\n \"cannot\", \"could\", \"couldn't\", \"did\", \"didn't\", \"do\", \"does\", \"doesn't\", \"doing\", \"don't\", \"down\", \"during\",\r\n \"each\", \"few\", \"for\", \"from\", \"further\", \"had\", \"hadn't\", \"has\", \"hasn't\", \"have\", \"haven't\", \"having\", \"he\",\r\n \"he'd\", \"he'll\", \"he's\", \"her\", \"here\", \"here's\", \"hers\", \"herself\", \"him\", \"himself\", \"his\", \"how\", \"how's\",\r\n \"i\", \"i'd\", \"i'll\", \"i'm\", \"i've\", \"if\", \"in\", \"into\", \"is\", \"isn't\", \"it\", \"it's\", \"its\", \"itself\", \"let's\",\r\n \"me\", \"more\", \"most\", \"mustn't\", \"my\", \"myself\", \"no\", \"nor\", \"not\", \"of\", \"off\", \"on\", \"once\", \"only\", \"or\",\r\n \"other\", \"ought\", \"our\", \"ours\", \"ourselves\", \"out\", \"over\", \"own\", \"same\", \"shan't\", \"she\", \"she'd\",\r\n \"she'll\", \"she's\", \"should\", \"shouldn't\", \"so\", \"some\", \"such\", \"than\", \"that\", \"that's\", \"the\", \"their\",\r\n \"theirs\", \"them\", \"themselves\", \"then\", \"there\", \"there's\", \"these\", \"they\", \"they'd\", \"they'll\", \"they're\",\r\n \"they've\", \"this\", \"those\", \"through\", \"to\", \"too\", \"under\", \"until\", \"up\", \"very\", \"was\", \"wasn't\", \"we\",\r\n \"we'd\", \"we'll\", \"we're\", \"we've\", \"were\", \"weren't\", \"what\", \"what's\", \"when\", \"when's\", \"where\", \"where's\",\r\n \"which\", \"while\", \"who\", \"who's\", \"whom\", \"why\", \"why's\", \"with\", \"won't\", \"would\", \"wouldn't\", \"you\",\r\n \"you'd\", \"you'll\", \"you're\", \"you've\", \"your\", \"yours\", \"yourself\", \"yourselves\", 'aren', 'can', 'couldn',\r\n \"didn\", \"doesn\", \"don\", \"hadn\", \"hasn\", \"haven\", \"isn\", \"let\", \"ll\", \"mustn\", \"re\", \"shan\", \"shouldn\", \"ve\", \r\n \"wasn\", \"weren\", \"won\", \"wouldn\"]\r\n\r\ncustomized_stopwords = [\"NUM\", \"wine\", \"flavors\", \"aroma\", \"aromas\", \"palate\", \"finish\", \"drink\", \"notes\", \"nose\", \"now\", \r\n \"offers\", \"well\", \"fruits\", \"texture\", \"shows\", \"like\",\"years\", \"character\", \"made\", \"just\", \"mouth\",\r\n \"vineyard\", \"also\", \"bit\", \"note\", \"hint\", \"one\", \"give\", \"will\", \"flavor\", \"alongside\", \"along\",\r\n \"ready\", \"yet\", \"mouthfeel\"]\r\nstopwords = common_stopwords + customized_stopwords\r\n\r\ntfidf_vectorizer = TfidfVectorizer(stop_words = stopwords, preprocessor=no_number)\r\ntext=tfidf_vectorizer.fit_transform(winedata_clear['description'])\r\n\r\nidf_rank_index = tfidf_vectorizer.idf_.argsort()[0 : 1000]\r\nidf_rank_word = []\r\nfor i in idf_rank_index:\r\n for word, index in tfidf_vectorizer.vocabulary_.items():\r\n if i == index:\r\n idf_rank_word.append(word)\r\n \r\n#print(idf_rank_word)\r\n\r\nnew_text = pd.DataFrame(text[:, idf_rank_index].todense(), columns = idf_rank_word)\r\n\r\n# Fit a Kmeans clustering model with different clusters\r\n\r\nclusters_text = list(range(2, 21))\r\nsilhouette = []\r\n\r\nfor cluster in clusters_text:\r\n kmeans_text = KMeans(n_clusters = cluster, max_iter = 1000)\r\n kmeans_text.fit(new_text)\r\n score = silhouette_score(new_text, kmeans_text.labels_,metric = 'cosine')\r\n silhouette.append(round(score,5))\r\n \r\n# Compute Silouette score to find the best cluster\r\n\r\nfig = plt.figure()\r\nplt.bar(clusters_text, silhouette)\r\nplt.plot(clusters_text, silhouette)\r\nplt.xlabel('Clusters')\r\nplt.ylabel('Silhouette Score')\r\nplt.title('Wine Text Data Silhouette Score Versus Clusters')\r\nfig.savefig('Silouette_tfidf.png')\r\n\r\nkmeans_5_text = KMeans(n_clusters = 5, max_iter = 1000)\r\nkmeans_5_text.fit(new_text)\r\n\r\nlabel = pd.Series(kmeans_5_text.labels_, name = 'cluster_label')\r\nlabel_text = new_text.join(label)\r\n\r\ndef top_ten(lda_list):\r\n \r\n top_ten = []\r\n top_index = np.argsort(-lda_list)[:10]\r\n for i in top_index:\r\n top_ten.append(new_text.columns[i])\r\n return top_ten\r\n \r\n# Build logistic regression model for different clusters\r\n\r\ndef logreg_top_ten(int):\r\n label_text['convert_label'] = np.where(label_text['cluster_label'] == int, 1, 0) # Convert the label to 1 and other label to 0\r\n X_train,X_test,Y_train,Y_test=train_test_split(label_text.drop(['cluster_label', 'convert_label'], axis = 1),\r\n label_text['convert_label'],test_size=0.25)\r\n \r\n max_score = 0\r\n max_c = 0\r\n for c in [0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1]:\r\n log_reg = LogisticRegression(C = c, solver = 'lbfgs')\r\n log_reg.fit(X_train, Y_train)\r\n log_reg_score = log_reg.score(X_test, Y_test)\r\n \r\n if log_reg_score > max_score:\r\n max_score = log_reg_score\r\n max_c = c\r\n \r\n log_reg_max = LogisticRegression(C = max_c, solver = 'lbfgs')\r\n log_reg_max.fit(X_train, Y_train)\r\n top_word = top_ten(log_reg_max.coef_.reshape(1000,))\r\n \r\n return top_word\r\n\r\nfor i in range(5):\r\n print(str(i)+':')\r\n print(logreg_top_ten(i))\r\n\r\n\r\nresult = winedata_clear[['title', 'price', 'points']].join(label)\r\nresult.to_csv('tfidf_five_cluster.csv')\r\n\r\nlabel_text = new_text.join(label)\r\n\r\ndef kmeans_top_bottom_ten(cluster_label):\r\n text_df = label_text[label_text['cluster_label'] == cluster_label]\r\n label_df = kmeans_5_text.cluster_centers_[cluster_label].reshape(1,-1)\r\n text_df['cosine_similarity'] = cosine_similarity(text_df.drop('cluster_label', axis = 1), label_df)\r\n \r\n top_ten_percent = text_df.nlargest(int(text_df.shape[0] * 0.1), 'cosine_similarity')\r\n bottom_ten_percent = text_df.nsmallest(int(text_df.shape[0] * 0.1), 'cosine_similarity')\r\n \r\n top_ten_final = winedata_clear[['title', 'description', 'price', 'points']].join(\r\n top_ten_percent[['cluster_label', 'cosine_similarity']], how = 'right')\r\n\r\n bottom_ten_final = winedata_clear[['title', 'description', 'price', 'points']].join(\r\n bottom_ten_percent[['cluster_label', 'cosine_similarity']], how = 'right')\r\n \r\n return top_ten_final, bottom_ten_final\r\n\r\nfor i in range(5):\r\n top_center, bottom_center = kmeans_top_bottom_ten(i)\r\n top_center.to_csv('tfidf_top_center_{}.csv'.format(i))\r\n bottom_center.to_csv('tfidf_bottom_center_{}.csv'.format(i))\r\n \r\n top_rating =top_center.nlargest(20, 'points')\r\n top_rating.to_csv('tfidf_top_rating_{}.csv'.format(i))\r\n \r\n bins = [0, 10, 20, 40, np.inf]\r\n labels = ['0-10', '10-20', '20-40', 'above 40']\r\n top_rating['price_bin'] = pd.cut(top_rating['price'], bins=bins, labels = labels)\r\n price_bin = top_rating.sort_values('price')\r\n price_bin.to_csv('tfidf_price_bin_{}.csv'.format(i))\r\n","sub_path":"Five-Cluster/Python Scripts/Wine_Review_TFIDF.py","file_name":"Wine_Review_TFIDF.py","file_ext":"py","file_size_in_byte":8135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"570060709","text":"#!/usr/bin/env python\n# -*- coding: latin-1 -*-\nimport sys\nimport os\nimport base64\nfrom PySide import QtCore\nfrom pprint import pprint\n\nQtCore.QSettings.setPath(QtCore.QSettings.IniFormat,\n QtCore.QSettings.UserScope,\n os.path.expanduser('~'))\n\n\nhome_path =os.path.expanduser('~')\nsettings_path = os.path.join(home_path, '.g5plugins')\nsettings_file = os.path.join(settings_path, 'g5plugins.ini')\n\n\nsettings = QtCore.QSettings(settings_file,\n QtCore.QSettings.IniFormat)\n\nsettings.setIniCodec(QtCore.QTextCodec.codecForName('ISO 8859-1'))\n\n\ndef get_templates_paths():\n import g5plugins.templates\n paths = []\n paths.append(os.path.join(settings_path, 'templates'))\n paths.append(g5plugins.templates.__path__[0])\n return paths\n \n\ndef get_reports_path():\n return os.path.join(settings_path, 'reports')\n\n\n\ndef save_settings(settings_dict, sync = False):\n \n for item in settings_dict:\n if isinstance(settings_dict[item], dict):\n settings.beginGroup(item) \n save_settings(settings_dict[item])\n settings.endGroup()\n else:\n settings.setValue(item, settings_dict[item])\n if sync:\n settings.sync()\n \n \n\n\ndef load_settings():\n settings_dict = {}\n settings_keys = settings.allKeys()\n for key in settings_keys:\n tags = key.split('/')\n aux = settings_dict\n for i in range(0, len(tags)):\n tag = str(tags[i])\n if i == len(tags)-1:\n aux[tag] = settings.value(key)\n else: \n if tag not in aux:\n aux[tag] = {} \n aux = aux[tag] \n\n return settings_dict\n\n\nif __name__ == '__main__':\n from pprint import pprint\n s = load_settings()\n\n s['test']={'a':{'b': 'c'}}\n save_settings(s, True)\n pprint(s)\n\n\n#print load_settings()\n# aux = {}\n# aux = aux[tag]\n# aux = settings_keys[key]\n# \n# settings_dict\n\n\n\n#def load_settings(tag):\n# values = {}\n# keys = settings.allKeys()\n# for key in keys:\n# tags = key.split('/')\n# if tags[0] == tag:\n# values[str(tags[1])] = settings.value(key)\n# \n# return values \n\n \n \n\n\n#t= 'test'\n#v = {'size': QtCore.QSize(32, 96), 'width': QtCore.QTime()}\n#store_settings(t, v)\n#pprint(settings_keys_to_dict())\n#ALTCHARS = 'YmMpx5VNUrYMwJXA'\n#\n#\n##QtCore.QSettings.setDefaultFormat(QtCore.QSettings.IniFormat)\n##settings = QtCore.QSettings('XcribanoSoftware', 'g5plugins')\n##settings = QtCore.QSettings(QtCore.QSettings.IniFormat,\n## QtCore.QSettings.UserScope,\n## QtCore.QString('.g5plugins'),\n## QtCore.QString('g5plugins'))\n#\n#print base64.b64encode('masterkey', ALTCHARS)\n#settings.setValue('version', '1.0')\n#settings.beginGroup('connection')\n#settings.setValue('username', 'SYSDBA')\n#settings.setValue('encrypt_password', str(base64.b64encode('masterkey', ALTCHARS)))\n#settings.setValue('host', '192.168.1.5')\n#settings.setValue('port', '3050')\n#settings.setValue('database', 'c:\\Gestion5SQL\\Datos\\REP_FERMIN_ESCRIBANO.fdb')\n#settings.endGroup()\n\n\n\n","sub_path":"g5plugins/settings/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"539665352","text":"import unittest\nimport numpy as np\nfrom classes.Predictand import Predictand\nfrom classes.Config import Config\nfrom classes.PredictandToyModel import PredictandToyModel\nfrom classes.Precursors import Precursors\nfrom classes.MixtureModel import MixtureGaussianModel\nfrom classes.ForecastNN import ForecastNN\nfrom scipy import stats\n# 1.) create object of composite and cluster\n# 2.) create forecast_nn method with artificial data\n# 3.)\n\n\nclass TestForecastNN(unittest.TestCase):\n \"\"\" Create test class for Forcast\"\"\"\n def setUp(self):\n \"\"\"initialize class cluster and composites\"\"\"\n # cluster\n cl_inifile = \"/home/sonja/Documents/Clustering-Forecast/ini/clusters_America_prec_t_test.ini\"\n cl_output_path = \"/home/sonja/Documents/Clustering-Forecast/tests/\"\n cl_output_label = \"TEST\"\n cl_config = Config(\"Test.log\")\n self.predictand = Predictand(cl_inifile, cl_output_path, cl_output_label, cl_config.config_dict)\n # composite\n co_inifile = \"/home/sonja/Documents/Clustering-Forecast/ini/composites_America_PSL.ini\"\n co_output_path = \"/home/sonja/Documents/Clustering-Forecast/tests/\"\n co_output_label = \"TEST\"\n co_config = Config(\"Test.log\")\n self.precursors = Precursors(co_inifile, co_output_path, co_output_label, co_config.config_dict)\n\n # set cluster method parameters\n self.method_name = \"ward\"\n self.k = 2\n self.predictand_var = \"prec_t\"\n # initialize Forecast class\n self.forecast_nn = ForecastNN(cl_inifile, cl_config.config_dict, self.k, self.method_name)\n\n self.initialize_data()\n\n def initialize_data(self):\n \"\"\" initialize toy data to test algorithm\"\"\"\n # create data for the two different composites\n # first two are snow data and second two data points are ice data\n self.gaussian_distributions = [\n {\"mean\": [-1, 1, 1, -1],\n \"sigma\": [[0.00001, 0., 0., 0.], [0., 0.00001, 0., 0.], [0., 0., 0.00001, 0.], [0., 0., 0., 0.00001]]},\n {\"mean\": [-1, 0, 1, 1],\n \"sigma\": [[0.00001, 0., 0., 0.], [0., 0.00001, 0., 0.], [0., 0., 0.00001, 0.], [0., 0., 0., 0.00001]]}, ]\n\n # create time series\n self.t_end = 5000\n self.time_series = range(self.t_end)\n\n # create instance to get samples for sic and sce\n precursors = MixtureGaussianModel(self.gaussian_distributions)\n # get samples\n self.X = (precursors.rvs(self.t_end))\n\n # array which lead with composites to clusters pf PRCP\n self.array = np.array([[1, 2, 1, 1], [-0.5, 0, -0.5, 1.], [-1, 0, -1, -1]], np.float)\n self.prcp_clusters = [{\"cluster\": [1, -1, 1]}, {\"cluster\": [1, 1, -1]}]\n self.prcp = PredictandToyModel(self.prcp_clusters, self.array)\n self.y = self.prcp.get_data_from_precursors(self.X)\n\n # set data to predictand input arrays\n self.predictand.dict_standardized_pred_1D[self.predictand.var] = self.y\n self.predictand.dict_pred_1D[self.predictand.var] = self.y\n\n # set data to precursors input data\n self.precursors.dict_precursors[\"snow\"] = self.X[:, :2]\n self.precursors.dict_standardized_precursors[\"snow\"] = self.X[:, :2]\n self.precursors.dict_prec_1D[\"snow\"] = self.X[:, :2]\n self.precursors.dict_precursors[\"ice\"] = self.X[:, 2:]\n self.precursors.dict_standardized_precursors[\"ice\"] = self.X[:, 2:]\n self.precursors.dict_prec_1D[\"ice\"] = self.X[:, 2:]\n\n self.precursors.dict_standardized_precursors.pop(\"PSL\")\n self.precursors.dict_prec_1D.pop(\"PSL\")\n self.precursors.dict_precursors.pop(\"PSL\")\n # Create train and test dataset with an 66:33 split\n self.y_train, self.X_train, self.y_test, self.X_test = self.train_test_split_pred(self.predictand,\n self.precursors,\n test_size=0.66,\n random_state=2019)\n\n @staticmethod\n def train_test_split_pred(predictand, precursors, test_size=0.66, random_state=2019):\n np.random.seed(random_state)\n len_predicts = len(predictand.dict_pred_1D[predictand.var])\n len_test_data = int(len_predicts * test_size)\n selected_time_steps = np.random.choice(len_predicts, len_test_data, replace=False)\n y_train = {}\n # noinspection PyPep8Naming\n X_train = {}\n y_test = {}\n # noinspection PyPep8Naming\n X_test = {}\n\n for i in range(len_predicts):\n if i in selected_time_steps:\n y_train.setdefault(predictand.var, []).append(predictand.dict_pred_1D[predictand.var][i])\n for prec in precursors.dict_precursors.keys():\n X_train.setdefault(prec, []).append(precursors.dict_prec_1D[prec][i])\n else:\n y_test.setdefault(predictand.var, []).append(predictand.dict_pred_1D[predictand.var][i])\n for prec in precursors.dict_precursors.keys():\n X_test.setdefault(prec, []).append(precursors.dict_prec_1D[prec][i])\n return y_train, X_train, y_test, X_test\n\n def calculate_clusters_and_composites(self):\n # Calculate clusters of precursors for var, by removing one year\n self.calculate_clusters_from_test_data(self.y_train, self.method_name, self.k)\n\n # Calculate composites\n self.precursors.get_composites_data_1d_train_test(self.X_train, self.predictand.f, self.k, self.method_name,\n self.predictand_var)\n\n def calculate_forecast(self):\n \"\"\"calculate forecast_nn using toy model data\"\"\"\n self.calculate_clusters_and_composites()\n self.forecast_nn.list_precursors_all = [\"snow\", \"ice\"]\n self.forecast_nn.list_precursors_combinations = [[\"snow\"], [\"ice\"], [\"snow\", \"ice\"]]\n\n # for this test purpose we take both precursors\n #train model using training data\n self.forecast_nn.train_nn(self.forecast_nn.list_precursors_all, self.predictand.clusters, self.precursors.dict_composites, self.X_train,\n self.y_train[self.predictand_var])\n\n self.forecast_data = np.zeros((len(self.y_test[self.predictand.var]), self.predictand.dict_pred_1D[\n f\"{self.predictand.var}\"].shape[1]))\n # Calculate forecast_nn for all years\n self.pattern_corr_values = []\n # Prediction\n for year in range(len(self.y_test[self.predictand.var])): # len(y_test[predictand.var])):\n forecast_temp = self.forecast_nn.prediction_nn(self.forecast_nn.list_precursors_all,\n self.predictand.clusters, self.precursors.dict_composites,\n self.X_test, year)\n # Assign forecast_nn data to array\n self.forecast_data[year] = forecast_temp\n\n # Calculate pattern correlation\n self.pattern_corr_values.append(round(stats.pearsonr(self.forecast_data[year],\n self.y_test[self.predictand.var][year])[0]))\n\n # Round data for correlation analysis\n for j in range(len(self.y_test[self.predictand.var])):\n for i in range(len(self.y_test[self.predictand.var][j])):\n self.y_test[self.predictand.var][j][i] = round(self.y_test[self.predictand.var][j][i])\n self.forecast_data[j][i] = round(self.forecast_data[j][i])\n\n def calculate_clusters_from_test_data(self, train_data: dict, method_name: str, k: int):\n \"\"\"\n calculate clusters for predictand variable\n :param train_data: cluster data which should be used to calculate clusters\n :param method_name: name of the method used for clustering\n :param k: number of clusters\n \"\"\"\n print('Calculate clusters')\n self.predictand.dict_standardized_pred_1D = train_data\n self.predictand._set_method_name(method_name)\n self.predictand._set_k(k)\n self.predictand._set_linkage()\n self.predictand._set_f()\n self.predictand._cluster_frequency()\n self.predictand._set_clusters_1d()\n\n\nclass TestInit(TestForecastNN):\n def test_initial_precursors_predictand_names(self):\n # First calculate clusters and composites and then check keys\n self.calculate_clusters_and_composites()\n # test whether names are correctly passed\n self.assertEqual(list(self.precursors.dict_composites.keys()), [\"snow\", \"ice\"])\n self.assertEqual(list(self.predictand.dict_standardized_pred_1D.keys())[0], \"prec_t\")\n\n def test_calculate_forecast_results(self):\n \"\"\" test whether toy model results lead to 100% forecast_nn\"\"\"\n\n self.calculate_forecast()\n self.time_correlation, self.significance = \\\n self.forecast_nn.calculate_time_correlation(np.array(self.y_test[self.predictand.var], dtype=int),\n np.array(self.forecast_data, dtype=int), 1, True)\n self.expected_time_correlation = [1, 1, 1]\n self.expected_significance = [0, 0, 0]\n # noinspection PyTypeChecker\n self.assertListEqual(np.around(self.time_correlation, decimals=0).astype('int').tolist(),\n self.expected_time_correlation)\n # noinspection PyTypeChecker\n self.assertListEqual(np.around(self.significance, decimals=0).astype('int').tolist(),\n self.expected_significance)\n # forecast_nn of pattern correlation should be for each forecast_nn 1\n # hence sum of all forecasts should be identical to the length of the forecasts\n self.assertEqual(sum(self.pattern_corr_values), len(self.y_test[self.predictand.var]))\n\n\nif __name__=='__main__':\n unittest.main()","sub_path":"tests/test_forecast_nn.py","file_name":"test_forecast_nn.py","file_ext":"py","file_size_in_byte":10078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"331512952","text":"from django.urls import path\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom .views import GithubRegistration, CreateProfile, Logout, ManualUserRegistration, Login\n\nurlpatterns = [\n path('signup/', ManualUserRegistration.as_view()),\n path('signin/', Login.as_view()),\n path('signout/', Logout.as_view()),\n path('signin/github/callback', GithubRegistration.as_view()),\n path('profile/create', CreateProfile.as_view()),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)","sub_path":"mhack/login/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"482197094","text":"import abb1\r\nimport numpy as np\r\nimport serial\r\nimport socket\r\nfrom threading import Thread\r\nimport time\r\nimport struct\r\n\r\n#Camera data Grabber Init\r\nhost = socket.gethostname()\r\nport = 5000 # initiate port no above 1024\r\nserver_socket = socket.socket() # get instance\r\n\r\n#Controller Init\r\nTargetsList=np.zeros((2000, 4))\r\nID=0\r\nTarget=False\r\nTclock=0\r\n#Conveyor belt encoder Init\r\nencoder=0 ; MMperCount=0.741806782 ; ConvSpeed=0 ; encoder1=0 ; t1=0; SpeedTStamp=0; SpeedFrequency=2 #Hz ConvSpeed Calc freq\r\n\r\nSorted=0 ; Missed=0 \r\nPetBin=[-320,566]\r\nAluBin=[-600,76]\r\nCartonBin=[430,-458]\r\nBin=[0,0]\r\n#Robot Initilisation\r\nCartesianSpeed=5000 #mm/s\r\n\r\nR = abb1.Robot(ip='192.168.125.1')\r\nR.set_speed([CartesianSpeed,50,50,50]) \r\nR.set_zone('z200') #set speed\r\nR.set_cartesian([[0,0,-1200], [0,0,1,0]]) # set to zero\r\n\r\nsin=0.4962; cos=-0.8681\r\nPanic=False\r\nVacuum=False\r\nPlaced=False\r\n\r\ndef thread1(threadname):\r\n global TargetsList,Tclock, Target, Bin, PrevBin, encoder,t1,ConvSpeed,encoderTStamp,SpeedTStamp, Panic,Vacuum,Sorted,Missed # Optional if you treat a as read-only\r\n while (1):\r\n Target=False\r\n #print(Tclock)\r\n print(encoder)\r\n print(ID)\r\n print('ConvSpeed',ConvSpeed,'mm/s')\r\n #print('Vacuum',Vacuum) \r\n #print('Panic',Panic)\r\n #print('Sorted items',Sorted)\r\n #print('Missed items',Missed)\r\n for i in range(0, ID):\r\n #print(encoder,TargetsList[i,1],encoder-TargetsList[i,1])\r\n \r\n \r\n if encoder-TargetsList[i,1]>-400 and encoder-TargetsList[i,1]<400 and TargetsList[i,3]==0: \r\n\r\n \r\n TargetData=[TargetsList[i,0],(encoder-TargetsList[i,1]),TargetsList[i,2],TargetsList[i,1]]\r\n print(TargetData)\r\n \r\n \r\n print('robot grab it---------------------------------------------------------------------------------------------------------------------')\r\n #Bin,Placed=Pick_n_Place(TargetData,ConvSpeed,Bin)\r\n Bin,Placed=Pick_n_Toss(TargetData,ConvSpeed,Bin)\r\n Placed=True\r\n if Placed==True :\r\n Sorted=Sorted+1\r\n TargetsList[i,3]=1 # Target Selected\r\n else:\r\n Missed=Missed+1\r\n TargetsList[i,3]=1\r\n break\r\n# if Target==True:\r\n# print('robot grab it---------------------------------------------------------------------------------------------------------------------')\r\n# print('robot grab it---------------------------------------------------------------------------------------------------------------------')\r\n#\r\n# time.sleep(3)\r\n# Sorted=Sorted+1\r\n# #Bin,Sorted,Missed=Pick_n_Place(TargetData,ConvSpeed,Bin,Sorted,Missed) \r\n# \r\n# \r\n \r\ndef thread2(threadname):\r\n global ID, TargetList,t0,encoder,Tclock,ConvSpeed\r\n ID=0\r\n server_socket.bind((host, port)) # bind host address and port together\r\n # configure how many client the server can listen simultaneously\r\n server_socket.listen(2)\r\n conn, address = server_socket.accept() # accept new connection\r\n print(\"Connection from: \" + str(address))\r\n t0=time.clock()\r\n unpacker= struct.Struct('I f f f')\r\n delay=0\r\n while True: \r\n data = conn.recv(unpacker.size) #take data\r\n unpacked_data = unpacker.unpack(data) #unpack\r\n if not data:\r\n break\r\n Class,X,Y,delay = unpacked_data \r\n \r\n #Vacuum,encoder,encoderTStamp=readArduino()\r\n TargetsList[ID,:]=[X,(float(encoder)-Y)-(delay*ConvSpeed),Class,False] #Save data at Targetlist for robot controller\r\n print('New Target',TargetsList[ID,:])\r\n Tclock=time.clock()\r\n ID=ID+1\r\n conn.close() # close the connection\r\n\r\ndef thread3(threadname):\r\n global encoder,ConvSpeed,Vacuum,Panic\r\n s=serial.Serial('COM5',9600)\r\n encoderTStamp=time.clock()\r\n SpeedFrequency=10\r\n SpeedTStamp=time.clock()\r\n encoderP=0\r\n a=1/6\r\n while(1):\r\n myData= s.readline()\r\n Vacuum=myData.decode('utf-8', errors='ignore')\r\n if int(Vacuum)>350:\r\n Vacuum=True\r\n else: Vacuum=False\r\n myData= s.readline()\r\n encoder=int(myData.decode('utf-8', errors='ignore'))*0.8167641 #0.78113 #0.741806782\r\n encoderTStamp=time.clock()\r\n if ((float(encoderTStamp)-float(SpeedTStamp))>1/SpeedFrequency):\r\n ConvSpeedRaw=(encoder-encoderP)/(encoderTStamp-SpeedTStamp)\r\n ConvSpeed=(1-a)*ConvSpeed+a*ConvSpeedRaw # 1st order loaw pass filter\r\n encoderP=encoder\r\n SpeedTStamp=encoderTStamp\r\n if ConvSpeed<100: Panic=True; print('--------------Conveyor is Stopped-----------------')\r\n else: Panic=False\r\n \r\n \r\n\r\ndef Pick_n_Place(TargetData,ConvSpeed,Bin):\r\n global Vacuum,encoder,Panic\r\n \r\n x=TargetData[0]\r\n y=TargetData[1]\r\n a = np.array((Bin[0] ,Bin[1]))\r\n b = np.array((x,y))\r\n dist=np.linalg.norm(a-b)\r\n #offset=0.02*(dist-300)\r\n #PickingTime=0.2+dist/CartesianSpeed\r\n xR=(-0.8681)*x-0.4962*y\r\n yR=0.4962*x-0.8681*y\r\n Placed=False\r\n if abs(xR)<600 and abs(yR)<600: \r\n if TargetData[2]==0:\r\n zoff=-1332 + 10\r\n Bin=AluBin\r\n elif TargetData[2]==1:\r\n zoff=-1376 + 10\r\n Bin=CartonBin\r\n elif TargetData[2]==2:\r\n zoff=-1380 + 10\r\n Bin=PetBin\r\n y=((int(encoder)-TargetData[3]))+(0.2+0.1)*ConvSpeed+90\r\n xR=(-0.8681)*x-0.4962*y\r\n yR=0.4962*x-0.8681*y\r\n \r\n R.set_cartesianBLUE([[xR,yR,zoff], [0.2,0.1,ConvSpeed,0.3]])\r\n time.sleep(0.2)\r\n \r\n \r\n if Vacuum==True :\r\n R.set_cartesianTime([[Bin[0],Bin[1],-1250], [0,0,1,0.2]])\r\n \r\n print('Target Sorted',TargetData)\r\n Placed=True\r\n else:\r\n R.set_cartesianTime([[Bin[0],Bin[1],-1250], [0,0,1,0.2]])\r\n print('Target missed',TargetData)\r\n Bin=[xR,yR]\r\n Placed=False\r\n return Bin,Placed\r\n\r\ndef Pick_n_Toss(TargetData,ConvSpeed,Bin):\r\n global Vacuum,encoder,Panic\r\n FollDist=ConvSpeed*0.1\r\n x=TargetData[0]\r\n y=TargetData[1]+FollDist\r\n xRPicked=(-0.8681)*x-0.4962*y\r\n yRPicked=0.4962*x-0.8681*y\r\n \r\n if abs(xRPicked)<600 and abs(yRPicked)<600: \r\n if TargetData[2]==0:\r\n zoff=-1332\r\n Bin=AluBin\r\n elif TargetData[2]==1:\r\n zoff=-1376\r\n Bin=CartonBin\r\n elif TargetData[2]==2:\r\n zoff=-1380+5\r\n Bin=PetBin\r\n theta=np.arctan2((Bin[1]-yRPicked),(Bin[0]-xRPicked))\r\n a=np.array(xRPicked,yRPicked)\r\n b=np.array((Bin[0],Bin[1])) ## https://stackoverflow.com/questions/50973041/typeerror-data-type-not-understood-numpy-zeros\r\n dist=np.linalg.norm(a-b)\r\n \r\n \r\n \r\n \r\n \r\n \r\n distMoveL=0.6*dist\r\n if dist<400 or TargetData[2]==1:\r\n distMoveL=0.8*dist\r\n \r\n xMoveL=xRPicked+np.cos(theta)*distMoveL\r\n yMoveL=yRPicked+np.sin(theta)*distMoveL\r\n \r\n \r\n y=((int(encoder)-TargetData[3]))+(0.05+0.1)*ConvSpeed+50 #+90\r\n xR=(-0.8681)*x-0.4962*y\r\n yR=0.4962*x-0.8681*y\r\n \r\n a = np.array((0,0))\r\n b = np.array((xMoveL,yMoveL))\r\n dist=np.linalg.norm(a-b)\r\n if abs(dist)<750:\r\n R.Pick_n_Toss([[xR,yR,zoff],[0.05,xMoveL,yMoveL,0.15]])\r\n else:\r\n print(xMoveL,yMoveL)\r\n \r\n return Bin,Placed\r\n\r\n\r\n#def readArduino():\r\n# myData= s.readline()\r\n# Vacuum=myData.decode('utf-8', errors='ignore')\r\n# if int(Vacuum)>350:\r\n# Vacuum=True\r\n# else: Vacuum=False\r\n# myData= s.readline()\r\n# encoder=myData.decode('utf-8', errors='ignore')\r\n# encoderTStamp=time.clock()\r\n# return bool(Vacuum),int(encoder)*0.741806782 ,encoderTStamp\r\n# \r\n# \r\n#def ComputeConvSpeed(encoder1,t1,encoder2,t2,ConvSpeed):\r\n# ConvSpeedRaw=(encoder2-encoder1)/(t2-t1)\r\n# a=1/6\r\n# ConvSpeed=(1-a)*ConvSpeed+a*ConvSpeedRaw # 1st order loaw pass filter\r\n# \r\n# return ConvSpeed \r\n# \r\n\r\n#def Pick_n_Spin:\r\n \r\n#angle2=180-angle\r\n#offset_elips=EMAma[1]*0.25 \r\n#Xpick=np.sin(angle2*0.0174532925)*offset_elips\r\n#Ypick=np.cos(angle2*0.0174532925)*offset_elips\r\n#Xpix=Exy[0]+Xpick\r\n#Ypix=Exy[1]+Ypick\r\n#cv2.putText(img,\".Exy\",(int(Exy[0]),int(Exy[1])),cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255,5))\r\n#cv2.putText(img,\".\",(int(Xpix),int(Ypix)),cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255),5)\r\n# \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#def Target_Planner:\r\n#def Target_Toss_Planner:\r\n#def GUI:\r\n#def PlotStats:\r\n\r\n\r\n\r\n\r\nthread1a = Thread( target=thread1, args=(\"Thread-1\", ) )\r\nthread2a = Thread( target=thread2, args=(\"Thread-2\", ) )\r\nthread3a = Thread( target=thread3, args=(\"Thread-2\", ) )\r\n\r\nthread1a.start()\r\nthread2a.start() \r\nthread3a.start() \r\n \r\nthread1a.join()\r\nthread2a.join()\r\nthread3a.join()","sub_path":"RobotControllerPythonNode.py","file_name":"RobotControllerPythonNode.py","file_ext":"py","file_size_in_byte":9294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"597125633","text":"healthy = ['pizza', \"frozen custard\"]\nhealthy.append(\"apple crisp\")\n#print(healthy)\nstart=6\nname=\"Andra Astalus\"\nstart_of_last=6\n\npoem= \"Where am i?\"\n#print(name[start_of_last:start_of_last+2])\ntask= [\"Subscribe\"]\ndifferent=task\ndifferent[0]=\"hey\"\nprint(task)","sub_path":"hello2.py","file_name":"hello2.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"114363965","text":"#!/usr/bin/env python\n\nimport optparse\n\nimport feather.wsgi\n\n\nDEFAULT_HOST = \"\"\nDEFAULT_PORT = 9000\n\ndef wsgiapp(environ, start_response):\n start_response(\"200, OK\", [\n ('content-type', 'text/plain'),\n ('content-length', '13')])\n return [\"Hello, World!\"]\n\n\nif __name__ == \"__main__\":\n parser = optparse.OptionParser()\n parser.add_option(\"-H\", \"--host\", default=DEFAULT_HOST)\n parser.add_option(\"-P\", \"--port\", type=\"int\", default=DEFAULT_PORT)\n\n options, args = parser.parse_args()\n feather.wsgi.serve((options.host, options.port), wsgiapp, debug=True)\n","sub_path":"examples/helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"627828135","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\nsys.path.append(\"../\")\nimport config\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nMODEL = \"UNET\"\nLOSS_FUNCTION = \"SMILE\"\n\nmodel_folder = MODEL + '_' + LOSS_FUNCTION\n\nprint(\"LOAD DATA\")\ntrain_data = np.load(os.path.join(config.NUMPY_DIR, \"train_image.npy\"))\ntrain_label = np.load(os.path.join(config.NUMPY_DIR, \"train_label.npy\"))\nepoch_loss = []\n\n#%%\ndef crop_and_concat(x1,x2):\n x1_shape = tf.shape(x1)\n x2_shape = tf.shape(x2)\n offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, 0]\n size = [-1, x2_shape[1], x2_shape[2], -1]\n x1_crop = tf.slice(x1, offsets, size)\n return tf.concat([x1_crop, x2], 3)\n#%%\nprint(\"BUILD MODEL\")\ntf.reset_default_graph()\nparameters = []\nwith tf.name_scope('data'):\n X = tf.placeholder(tf.float32, [None, config.patch_size, config.patch_size, config.channels], name=\"inputs\")\n Y = tf.placeholder(tf.int32, [None, config.patch_size, config.patch_size], name=\"labels\")\n\nwith tf.variable_scope(\"conv1\", reuse=tf.AUTO_REUSE):\n conv1_1_W = tf.get_variable(\"conv1_1_W\", [3, 3, config.channels, 64], initializer=tf.contrib.layers.xavier_initializer())\n conv1_1_b = tf.get_variable(\"conv1_1_b\", [64], initializer=tf.zeros_initializer())\n conv1_2_W = tf.get_variable(\"conv1_2_W\", [3, 3, 64, 64], initializer=tf.contrib.layers.xavier_initializer())\n conv1_2_b = tf.get_variable(\"conv1_2_b\", [64], initializer=tf.zeros_initializer())\nconv1_1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(X, conv1_1_W, strides=[1,1,1,1], padding=\"SAME\"), conv1_1_b))\nconv1_2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1_1, conv1_2_W, strides=[1,1,1,1], padding=\"SAME\"), conv1_2_b))\nparameters += [conv1_1_W, conv1_1_b, conv1_2_W, conv1_2_b]\n\nmaxpool1, maxpool1_ind = tf.nn.max_pool_with_argmax(conv1_2, ksize = [1,2,2,1], strides=[1,2,2,1], padding=\"SAME\")\nprint(\"maxpool1:\", maxpool1.shape)\n\nwith tf.variable_scope(\"conv2\", reuse=tf.AUTO_REUSE):\n conv2_1_W = tf.get_variable(\"conv2_1_W\", [3, 3, 64, 128], initializer=tf.contrib.layers.xavier_initializer())\n conv2_1_b = tf.get_variable(\"conv2_1_b\", [128], initializer=tf.zeros_initializer())\n conv2_2_W = tf.get_variable(\"conv2_2_W\", [3, 3, 128, 128], initializer=tf.contrib.layers.xavier_initializer())\n conv2_2_b = tf.get_variable(\"conv2_2_b\", [128], initializer=tf.zeros_initializer())\nconv2_1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(maxpool1, conv2_1_W, strides=[1,1,1,1], padding=\"SAME\"), conv2_1_b))\nconv2_2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2_1, conv2_2_W, strides=[1,1,1,1], padding=\"SAME\"), conv2_2_b))\nparameters += [conv2_1_W, conv2_1_b, conv2_2_W, conv2_2_b]\n\nmaxpool2, maxpool2_ind = tf.nn.max_pool_with_argmax(conv2_2, ksize = [1,2,2,1], strides=[1,2,2,1], padding=\"SAME\")\nprint(\"maxpool2:\", maxpool2.shape)\n\nwith tf.variable_scope(\"conv3\", reuse=tf.AUTO_REUSE):\n conv3_1_W = tf.get_variable(\"conv3_1_W\", [3, 3, 128, 256], initializer=tf.contrib.layers.xavier_initializer())\n conv3_1_b = tf.get_variable(\"conv3_1_b\", [256], initializer=tf.zeros_initializer())\n conv3_2_W = tf.get_variable(\"conv3_2_W\", [3, 3, 256, 256], initializer=tf.contrib.layers.xavier_initializer())\n conv3_2_b = tf.get_variable(\"conv3_2_b\", [256], initializer=tf.zeros_initializer())\nconv3_1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(maxpool2, conv3_1_W, strides=[1,1,1,1], padding=\"SAME\"), conv3_1_b))\nconv3_2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv3_1, conv3_2_W, strides=[1,1,1,1], padding=\"SAME\"), conv3_2_b))\nparameters += [conv3_1_W, conv3_1_b, conv3_2_W, conv3_2_b]\n\nmaxpool3, maxpool3_ind = tf.nn.max_pool_with_argmax(conv3_2, ksize = [1,2,2,1], strides=[1,2,2,1], padding=\"SAME\")\nprint(\"maxpool3:\", maxpool3.shape)\n\nwith tf.variable_scope(\"conv4\", reuse=tf.AUTO_REUSE):\n conv4_1_W = tf.get_variable(\"conv4_1_W\", [3, 3, 256, 512], initializer=tf.contrib.layers.xavier_initializer())\n conv4_1_b = tf.get_variable(\"conv4_1_b\", [512], initializer=tf.zeros_initializer())\n conv4_2_W = tf.get_variable(\"conv4_2_W\", [3, 3, 512, 512], initializer=tf.contrib.layers.xavier_initializer())\n conv4_2_b = tf.get_variable(\"conv4_2_b\", [512], initializer=tf.zeros_initializer())\nconv4_1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(maxpool3, conv4_1_W, strides=[1,1,1,1], padding=\"SAME\"), conv4_1_b))\nconv4_2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv4_1, conv4_2_W, strides=[1,1,1,1], padding=\"SAME\"), conv4_2_b))\nparameters += [conv4_1_W, conv4_1_b, conv4_2_W, conv4_2_b]\n\nmaxpool4, maxpool4_ind = tf.nn.max_pool_with_argmax(conv4_2, ksize = [1,2,2,1], strides=[1,2,2,1], padding=\"SAME\")\nprint(\"maxpool4:\", maxpool4.shape)\n\nwith tf.variable_scope(\"conv5\", reuse=tf.AUTO_REUSE):\n conv5_1_W = tf.get_variable(\"conv5_1_W\", [3, 3, 512, 1024], initializer=tf.contrib.layers.xavier_initializer())\n conv5_1_b = tf.get_variable(\"conv5_1_b\", [1024], initializer=tf.zeros_initializer())\n conv5_2_W = tf.get_variable(\"conv5_2_W\", [3, 3, 1024, 1024], initializer=tf.contrib.layers.xavier_initializer())\n conv5_2_b = tf.get_variable(\"conv5_2_b\", [1024], initializer=tf.zeros_initializer())\nconv5_1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(maxpool4, conv5_1_W, strides=[1,1,1,1], padding=\"SAME\"), conv5_1_b))\nconv5_2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv5_1, conv5_2_W, strides=[1,1,1,1], padding=\"SAME\"), conv5_2_b))\nparameters += [conv5_1_W, conv5_1_b, conv5_2_W, conv5_2_b]\nprint(\"conv5:\", conv5_2.shape)\n\nwith tf.variable_scope(\"upconv5\", reuse=tf.AUTO_REUSE):\n upconv5_1_W = tf.get_variable(\"upconv5_1_W\", [2, 2, 512, 1024], initializer=tf.contrib.layers.xavier_initializer())\n upconv5_1_b = tf.get_variable(\"upconv5_1_b\", [512], initializer=tf.zeros_initializer())\n upconv5_2_W = tf.get_variable(\"upconv5_2_W\", [3, 3, 1024, 512], initializer=tf.contrib.layers.xavier_initializer())\n upconv5_2_b = tf.get_variable(\"upconv5_2_b\", [512], initializer=tf.zeros_initializer())\n upconv5_3_W = tf.get_variable(\"upconv5_3_W\", [3, 3, 512, 512], initializer=tf.contrib.layers.xavier_initializer())\n upconv5_3_b = tf.get_variable(\"upconv5_3_b\", [512], initializer=tf.zeros_initializer())\nupconv5_1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d_transpose(conv5_2, upconv5_1_W, tf.stack([tf.shape(conv5_2)[0], 2*conv5_2.shape[1].value, 2*conv5_2.shape[2].value, 512]), strides=[1,2,2,1], padding=\"SAME\"), upconv5_1_b))\nprint('conv4_2', conv4_2.shape)\nprint('upconv5_1', upconv5_1.shape)\nupconv5_2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(crop_and_concat(conv4_2, upconv5_1), upconv5_2_W, strides=[1,1,1,1], padding=\"SAME\"), upconv5_2_b))\nupconv5_3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(upconv5_2, upconv5_3_W, strides=[1,1,1,1], padding=\"SAME\"), upconv5_3_b))\nparameters += [upconv5_1_W, upconv5_1_b, upconv5_2_W, upconv5_2_b, upconv5_3_W, upconv5_3_b]\nprint(\"upconv5:\", upconv5_3.shape)\n\nwith tf.variable_scope(\"upconv4\", reuse=tf.AUTO_REUSE):\n upconv4_1_W = tf.get_variable(\"upconv4_1_W\", [2, 2, 256, 512], initializer=tf.contrib.layers.xavier_initializer())\n upconv4_1_b = tf.get_variable(\"upconv4_1_b\", [256], initializer=tf.zeros_initializer())\n upconv4_2_W = tf.get_variable(\"upconv4_2_W\", [3, 3, 512, 256], initializer=tf.contrib.layers.xavier_initializer())\n upconv4_2_b = tf.get_variable(\"upconv4_2_b\", [256], initializer=tf.zeros_initializer())\n upconv4_3_W = tf.get_variable(\"upconv4_3_W\", [3, 3, 256, 256], initializer=tf.contrib.layers.xavier_initializer())\n upconv4_3_b = tf.get_variable(\"upconv4_3_b\", [256], initializer=tf.zeros_initializer())\nupconv4_1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d_transpose(upconv5_3, upconv4_1_W, tf.stack([tf.shape(upconv5_3)[0], 2*upconv5_3.shape[1].value, 2*upconv5_3.shape[2].value, 256]), strides=[1,2,2,1], padding=\"SAME\"), upconv4_1_b))\nupconv4_2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(crop_and_concat(conv3_2, upconv4_1), upconv4_2_W, strides=[1,1,1,1], padding=\"SAME\"), upconv4_2_b))\nupconv4_3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(upconv4_2, upconv4_3_W, strides=[1,1,1,1], padding=\"SAME\"), upconv4_3_b))\nparameters += [upconv4_1_W, upconv4_1_b, upconv4_2_W, upconv4_2_b, upconv4_3_W, upconv4_3_b]\nprint(\"upconv4:\", upconv4_3.shape)\n\nwith tf.variable_scope(\"upconv3\", reuse=tf.AUTO_REUSE):\n upconv3_1_W = tf.get_variable(\"upconv3_1_W\", [2, 2, 128, 256], initializer=tf.contrib.layers.xavier_initializer())\n upconv3_1_b = tf.get_variable(\"upconv3_1_b\", [128], initializer=tf.zeros_initializer())\n upconv3_2_W = tf.get_variable(\"upconv3_2_W\", [3, 3, 256, 128], initializer=tf.contrib.layers.xavier_initializer())\n upconv3_2_b = tf.get_variable(\"upconv3_2_b\", [128], initializer=tf.zeros_initializer())\n upconv3_3_W = tf.get_variable(\"upconv3_3_W\", [3, 3, 128, 128], initializer=tf.contrib.layers.xavier_initializer())\n upconv3_3_b = tf.get_variable(\"upconv3_3_b\", [128], initializer=tf.zeros_initializer())\nupconv3_1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d_transpose(upconv4_3, upconv3_1_W, tf.stack([tf.shape(upconv4_3)[0], 2*upconv4_3.shape[1].value, 2*upconv4_3.shape[2].value, 128]), strides=[1,2,2,1], padding=\"SAME\"), upconv3_1_b))\nupconv3_2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(crop_and_concat(conv2_2, upconv3_1), upconv3_2_W, strides=[1,1,1,1], padding=\"SAME\"), upconv3_2_b))\nupconv3_3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(upconv3_2, upconv3_3_W, strides=[1,1,1,1], padding=\"SAME\"), upconv3_3_b))\nparameters += [upconv3_1_W, upconv3_1_b, upconv3_2_W, upconv3_2_b, upconv3_3_W, upconv3_3_b]\nprint(\"upconv3:\", upconv3_3.shape)\n\nwith tf.variable_scope(\"upconv2\", reuse=tf.AUTO_REUSE):\n upconv2_1_W = tf.get_variable(\"upconv2_1_W\", [2, 2, 64, 128], initializer=tf.contrib.layers.xavier_initializer())\n upconv2_1_b = tf.get_variable(\"upconv2_1_b\", [64], initializer=tf.zeros_initializer())\n upconv2_2_W = tf.get_variable(\"upconv2_2_W\", [3, 3, 128, 64], initializer=tf.contrib.layers.xavier_initializer())\n upconv2_2_b = tf.get_variable(\"upconv2_2_b\", [64], initializer=tf.zeros_initializer())\n upconv2_3_W = tf.get_variable(\"upconv2_3_W\", [3, 3, 64, 64], initializer=tf.contrib.layers.xavier_initializer())\n upconv2_3_b = tf.get_variable(\"upconv2_3_b\", [64], initializer=tf.zeros_initializer())\nupconv2_1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d_transpose(upconv3_3, upconv2_1_W, tf.stack([tf.shape(upconv3_3)[0], 2*upconv3_3.shape[1].value, 2*upconv3_3.shape[2].value, 64]), strides=[1,2,2,1], padding=\"SAME\"), upconv2_1_b))\nupconv2_2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(crop_and_concat(conv1_2, upconv2_1), upconv2_2_W, strides=[1,1,1,1], padding=\"SAME\"), upconv2_2_b))\nupconv2_3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(upconv2_2, upconv2_3_W, strides=[1,1,1,1], padding=\"SAME\"), upconv2_3_b))\nparameters += [upconv2_1_W, upconv2_1_b, upconv2_2_W, upconv2_2_b, upconv2_3_W, upconv2_3_b]\nprint(\"upconv2:\", upconv2_3.shape)\n\nwith tf.variable_scope(\"Output\", reuse=tf.AUTO_REUSE):\n Output_W = tf.get_variable(\"Output_W\", [1, 1, 64, config.classes], initializer=tf.contrib.layers.xavier_initializer())\n Output_b = tf.get_variable(\"Output_b\", [config.classes], initializer=tf.zeros_initializer())\nZ = tf.nn.bias_add(tf.nn.conv2d(upconv2_3, Output_W, strides=[1, 1, 1, 1], padding=\"SAME\"), Output_b)\nparameters += [Output_W, Output_b]\nprint(\"Z:\", Z.shape)\n\nwith tf.name_scope(\"loss_function\"):\n intermediate = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=Y, logits=Z)\n ones_mask = tf.ones((config.FCN_batch_size, Y.shape[1], Y.shape[2]), tf.int32)\n loss_mask = tf.where(Y>0, ones_mask, Y)\n pixel_wise_loss = intermediate*tf.cast(loss_mask, tf.float32)\n loss = tf.reduce_mean(pixel_wise_loss)\n # loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=Y, logits=Z))\ntf.summary.scalar('loss', loss)\n\nglobal_step = tf.Variable(0, name='global_step', trainable=False)\nwith tf.variable_scope(\"optimizer\", reuse=tf.AUTO_REUSE):\n optimizer = tf.train.AdamOptimizer(config.UNET_learning_rate).minimize(loss, global_step)\n\nprint(\"TRAIN MODEL\")\npre_saver = tf.train.Saver()\nsaver = tf.train.Saver()\nmerged_summary_op = tf.summary.merge_all()\nwith tf.Session() as sess:\n summary_writer = tf.summary.FileWriter(os.path.join(config.MODEL_DIR, \"UNET_SMILE\"), sess.graph)\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n\n n_batches = train_data.shape[0]//config.UNET_batch_size\n for i in range(config.UNET_n_epochs):\n total_loss = 0\n if i == 0:\n try:\n pre_saver.restore(sess, os.path.join(config.MODEL_DIR, model_folder, \"model.ckpt\"))\n except:\n pass\n \n for batch in range(n_batches):\n data_batch = train_data[batch*config.UNET_batch_size:(batch+1)*config.UNET_batch_size, :, :, :]\n label_batch = train_label[batch*config.UNET_batch_size:(batch+1)*config.UNET_batch_size, :, :]\n feed_dict = {X: data_batch, Y: label_batch}\n summary_str, _, loss_batch = sess.run([merged_summary_op, optimizer, loss], feed_dict=feed_dict)\n summary_writer.add_summary(summary_str, global_step=global_step.eval())\n total_loss += loss_batch\n if not batch%10:\n print('Batch {0}: Loss: {1}'.format(batch, loss_batch))\n print('\\n Epoch {0}: Loss: {1}\\n'.format(i, total_loss/n_batches))\n epoch_loss.append(total_loss/n_batches)\n\n if i % 5 == 0:\n iter = i+1\n save_path = saver.save(sess, os.path.join(config.MODEL_DIR, model_folder, \"model\"+str(iter),\".ckpt\"))\n epoch_loss_npy = np.array(epoch_loss)\n np.save(os.path.join(config.MODEL_DIR, model_folder, \"loss_\"+str(iter)), epoch_loss)\n\n summary_writer.close()\n save_path = saver.save(sess, os.path.join(config.MODEL_DIR, model_folder, \"model_\"+str(config.UNET_n_epochs)+\".ckpt\"))\n np.save(os.path.join(config.MODEL_DIR, model_folder, \"loss_\"+str(config.UNET_n_epochs)), epoch_loss)\n","sub_path":"UNET/train_model_smile.py","file_name":"train_model_smile.py","file_ext":"py","file_size_in_byte":13919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"163015821","text":"from celery import Celery\nfrom celery.signals import task_postrun\nimport time\nimport sys\nimport json\nimport socketio\nimport redis\n\n\nmanager = socketio.RedisManager('redis://redis_host:6379/0')\n\n\napplication = Celery('CeleryAppName', broker='redis://redis_host:6379/0')\napplication.conf.backend = 'redis://redis_host:6379/0'\napplication.conf.update(\n task_serializer='json',\n accept_content=['json'],\n result_serializer='json',\n timezone='Europe/Moscow',\n enable_utc=True,\n)\n\n\n@application.task(bind=True, name='creator')\ndef some_work(self, some_data: dict) -> dict:\n res = some_data\n connection = redis.Redis(host='redis_host', port=6379, db=1)\n print(f'Sleeping for {res.get(\"time\")}', file=sys.stdout, flush=True)\n for x in range(int(res.get('time') / 2)):\n res['percent'] = res.get('percent') + (100 / (int(res.get('time') / 2)))\n print(f'Emitting message')\n manager.emit('worker_message', res, room='admin')\n connection.set(res.get('task_id'), json.dumps(res))\n time.sleep(2)\n return res\n\n\n@task_postrun.connect()\ndef task_postrun(retval=None, task_id=None, task=None, args=None, **kwargs) -> None:\n print(f'Finishing with task {task}')\n connection = redis.Redis(host='redis_host', port=6379, db=1)\n retval['percent'] = 100\n retval['ready'] = True\n retval['result'] = retval['description'][::-1]\n connection.set(retval.get('task_id'), json.dumps(retval))\n print(f'Redis has been updated')\n manager.emit('worker_message', retval, room='admin')\n print(f'User has been informed')\n","sub_path":"backend/workers/asgi_socket_workers.py","file_name":"asgi_socket_workers.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"579255393","text":"# -*- coding: utf-8 -*-\nu\"\"\"\nCopyright 2016 Telefónica Investigación y Desarrollo, S.A.U.\nThis file is part of Toolium.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\n\nimport mock\nimport pytest\n\nfrom toolium.behave.environment import get_jira_key_from_scenario, before_all\nfrom toolium.config_files import ConfigFiles\n\ntags = (\n ([\"jira('PROJECT-32')\"], 'PROJECT-32'),\n ([\"jira=PROJECT-32\"], 'PROJECT-32'),\n ([\"jira(PROJECT-32)\"], 'PROJECT-32'),\n ([\"jira='PROJECT-32'\"], 'PROJECT-32'),\n ([\"jiraPROJECT-32\"], 'PROJECT-32'),\n ([\"jira\"], None),\n ([\"PROJECT-32\"], None),\n (['slow', \"jira('PROJECT-32')\", 'critical'], 'PROJECT-32'),\n (['slow', \"PROJECT-32\", 'critical'], None),\n (['slow', \"jira('PROJECT-32')\", \"jira('PROJECT-33')\"], 'PROJECT-32'),\n)\n\n\n@pytest.mark.parametrize(\"tag_list, jira_key\", tags)\ndef test_get_jira_key_from_scenario(tag_list, jira_key):\n scenario = mock.Mock()\n scenario.tags = tag_list\n\n # Extract Jira key and compare with expected key\n assert jira_key == get_jira_key_from_scenario(scenario)\n\n\n@mock.patch('toolium.behave.environment.create_and_configure_wrapper')\ndef test_before_all(create_and_configure_wrapper):\n # Create context mock\n context = mock.MagicMock()\n context.config.userdata.get.return_value = None\n context.config_files = ConfigFiles()\n\n before_all(context)\n\n # Check that configuration folder is the same as environment folder\n expected_config_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'conf')\n assert context.config_files.config_directory == expected_config_directory\n assert context.config_files.config_properties_filenames is None\n assert context.config_files.config_log_filename is None\n\n\nproperties = (\n ('env'),\n ('Config_environment'),\n)\n\n\n@pytest.mark.parametrize(\"property_name\", properties)\n@mock.patch('toolium.behave.environment.create_and_configure_wrapper')\ndef test_before_all_config_environment(create_and_configure_wrapper, property_name):\n # Create context mock\n context = mock.MagicMock()\n context.config.userdata.get.side_effect = lambda x: 'os' if x == property_name else None\n context.config_files = ConfigFiles()\n\n before_all(context)\n\n # Check that configuration folder is the same as environment folder and property 'Config_environment' is configured\n expected_config_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'conf')\n assert context.config_files.config_directory == expected_config_directory\n assert context.config_files.config_properties_filenames == 'properties.cfg;os-properties.cfg;local-os-properties.cfg'\n assert context.config_files.config_log_filename is None\n assert os.environ['Config_environment'] == 'os'\n del os.environ[\"Config_environment\"]\n","sub_path":"toolium/test/behave/test_environment.py","file_name":"test_environment.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"336744044","text":"#!/usr/bin/env python\n###############################################################################\n# $Id$\n#\n# Project: GDAL/OGR Test Suite\n# Purpose: FGDB driver testing.\n# Author: Even Rouault \n#\n###############################################################################\n# Copyright (c) 2011, Even Rouault \n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n###############################################################################\n\nimport os\nimport sys\nimport string\nimport shutil\n\nsys.path.append( '../pymod' )\n\nimport gdaltest\nimport ogrtest\nimport ogr\nimport osr\n\n###############################################################################\n# Test if driver is available\n\ndef ogr_fgdb_init():\n\n ogrtest.fgdb_drv = None\n\n try:\n ogrtest.fgdb_drv = ogr.GetDriverByName('FileGDB')\n except:\n pass\n\n if ogrtest.fgdb_drv is None:\n return 'skip'\n\n try:\n shutil.rmtree(\"tmp/test.gdb\")\n except:\n pass\n\n return 'success'\n\n###############################################################################\n# Write and read back various geometry types\n\ndef ogr_fgdb_1():\n if ogrtest.fgdb_drv is None:\n return 'skip'\n\n srs = osr.SpatialReference()\n srs.SetFromUserInput(\"WGS84\")\n\n ds = ogrtest.fgdb_drv.CreateDataSource(\"tmp/test.gdb\")\n\n datalist = [ [ \"point\", ogr.wkbPoint, \"POINT (1 2)\" ],\n [ \"multipoint\", ogr.wkbMultiPoint, \"MULTIPOINT (1 2,3 4)\" ],\n [ \"linestring\", ogr.wkbLineString, \"LINESTRING (1 2,3 4)\", \"MULTILINESTRING ((1 2,3 4))\" ],\n [ \"multilinestring\", ogr.wkbMultiLineString, \"MULTILINESTRING ((1 2,3 4))\" ],\n [ \"polygon\", ogr.wkbPolygon, \"POLYGON ((0 0,0 1,1 1,1 0,0 0))\", \"MULTIPOLYGON (((0 0,0 1,1 1,1 0,0 0)))\" ],\n [ \"multipolygon\", ogr.wkbMultiPolygon, \"MULTIPOLYGON (((0 0,0 1,1 1,1 0,0 0)))\" ],\n [ \"point25D\", ogr.wkbPoint25D, \"POINT (1 2 3)\" ],\n [ \"multipoint25D\", ogr.wkbMultiPoint25D, \"MULTIPOINT (1 2 -10,3 4 -20)\" ],\n [ \"linestring25D\", ogr.wkbLineString25D, \"LINESTRING (1 2 -10,3 4 -20)\", \"MULTILINESTRING ((1 2 -10,3 4 -20))\" ],\n [ \"multilinestring25D\", ogr.wkbMultiLineString25D, \"MULTILINESTRING ((1 2 -10,3 4 -20))\" ],\n [ \"polygon25D\", ogr.wkbPolygon25D, \"POLYGON ((0 0 -10,0 1 -10,1 1 -10,1 0 -10,0 0 -10))\", \"MULTIPOLYGON (((0 0 -10,0 1 -10,1 1 -10,1 0 -10,0 0 -10)))\" ],\n [ \"multipolygon25D\", ogr.wkbMultiPolygon25D, \"MULTIPOLYGON (((0 0 -10,0 1 -10,1 1 -10,1 0 -10,0 0 -10)))\" ],\n ]\n\n for data in datalist:\n lyr = ds.CreateLayer(data[0], geom_type = data[1], srs = srs)\n lyr.CreateField(ogr.FieldDefn(\"str\", ogr.OFTString))\n lyr.CreateField(ogr.FieldDefn(\"int\", ogr.OFTInteger))\n lyr.CreateField(ogr.FieldDefn(\"real\", ogr.OFTReal))\n feat = ogr.Feature(lyr.GetLayerDefn())\n feat.SetGeometry(ogr.CreateGeometryFromWkt(data[2]))\n feat.SetField(\"str\", \"foo_\\xc3\\xa9\")\n feat.SetField(\"int\", 123)\n feat.SetField(\"real\", 4.56)\n lyr.CreateFeature(feat)\n\n for data in datalist:\n lyr = ds.GetLayerByName(data[0])\n if lyr.GetSpatialRef().IsSame(srs) != 1:\n print(lyr.GetSpatialRef())\n return 'fail'\n feat = lyr.GetNextFeature()\n try:\n expected_wkt = data[3]\n except:\n expected_wkt = data[2]\n if feat.GetGeometryRef().ExportToWkt() != expected_wkt:\n feat.DumpReadable()\n return 'fail'\n\n ds = None\n\n return 'success'\n\n###############################################################################\n# Run test_ogrsf\n\ndef ogr_fgdb_2():\n if ogrtest.fgdb_drv is None:\n return 'skip'\n\n import test_cli_utilities\n if test_cli_utilities.get_test_ogrsf_path() is None:\n return 'skip'\n\n ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' -ro tmp/test.gdb')\n\n if ret.find('INFO') == -1 or ret.find('ERROR') != -1:\n print(ret)\n return 'fail'\n\n return 'success'\n\n###############################################################################\n# Run ogr2ogr\n\ndef ogr_fgdb_3():\n if ogrtest.fgdb_drv is None:\n return 'skip'\n\n import test_cli_utilities\n if test_cli_utilities.get_ogr2ogr_path() is None:\n return 'skip'\n if test_cli_utilities.get_test_ogrsf_path() is None:\n return 'skip'\n\n try:\n shutil.rmtree(\"tmp/poly.gdb\")\n except:\n pass\n\n gdaltest.runexternal(test_cli_utilities.get_ogr2ogr_path() + ' -f filegdb tmp/poly.gdb data/poly.shp -nlt MULTIPOLYGON -a_srs None')\n\n ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' tmp/poly.gdb')\n #print ret\n\n if ret.find('INFO') == -1 or ret.find('ERROR') != -1:\n print(ret)\n return 'fail'\n\n return 'success'\n\n###############################################################################\n# Test delete layer\n\ndef ogr_fgdb_4():\n if ogrtest.fgdb_drv is None:\n return 'skip'\n\n for j in range(2):\n\n # Create a layer\n ds = ogr.Open(\"tmp/test.gdb\", update = 1)\n srs = osr.SpatialReference()\n srs.SetFromUserInput(\"WGS84\")\n lyr = ds.CreateLayer(\"layer_to_remove\", geom_type = ogr.wkbPoint, srs = srs)\n lyr.CreateField(ogr.FieldDefn(\"str\", ogr.OFTString))\n feat = ogr.Feature(lyr.GetLayerDefn())\n feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(2 49)'))\n feat.SetField(\"str\", \"foo\")\n feat = None\n lyr = None\n\n if j == 1:\n ds = None\n ds = ogr.Open(\"tmp/test.gdb\", update = 1)\n\n # Delete it\n for i in range(ds.GetLayerCount()):\n if ds.GetLayer(i).GetName() == 'layer_to_remove':\n ds.DeleteLayer(i)\n break\n\n # Check it no longer exists\n lyr = ds.GetLayerByName('layer_to_remove')\n ds = None\n\n if lyr is not None:\n gdaltest.post_reason('failed at iteration %d' % j)\n return 'fail'\n\n return 'success'\n\n###############################################################################\n# Test DeleteDataSource()\n\ndef ogr_fgdb_5():\n if ogrtest.fgdb_drv is None:\n return 'skip'\n\n if ogrtest.fgdb_drv.DeleteDataSource(\"tmp/test.gdb\") != 0:\n gdaltest.post_reason('DeleteDataSource() failed')\n return 'fail'\n\n try:\n os.stat(\"tmp/test.gdb\")\n gdaltest.post_reason(\"tmp/test.gdb still existing\")\n return 'fail'\n except:\n pass\n\n return 'success'\n\n###############################################################################\n# Cleanup\n\ndef ogr_fgdb_cleanup():\n if ogrtest.fgdb_drv is None:\n return 'skip'\n\n try:\n shutil.rmtree(\"tmp/test.gdb\")\n except:\n pass\n\n try:\n shutil.rmtree(\"tmp/poly.gdb\")\n except:\n pass\n\n return 'success'\n\ngdaltest_list = [\n ogr_fgdb_init,\n ogr_fgdb_1,\n ogr_fgdb_2,\n ogr_fgdb_3,\n ogr_fgdb_4,\n ogr_fgdb_5,\n ogr_fgdb_cleanup,\n ]\n\nif __name__ == '__main__':\n\n gdaltest.setup_run( 'ogr_fgdb' )\n\n gdaltest.run_tests( gdaltest_list )\n\n gdaltest.summarize()\n\n\n\n","sub_path":"autotest/ogr/ogr_fgdb.py","file_name":"ogr_fgdb.py","file_ext":"py","file_size_in_byte":8331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"580678184","text":"from constants import *\nimport numpy as np\n\ndef percolation_noliquid(self, iii):\n\n\t'''\n\tThis is a simple (and very wrong, probably) percolation scheme that does not\n\tallow any liquid water to exist. Any water that can be refrozed by the \n\tcold content of the firn will be. There is no limit of how much of the\n\tporosity can be filled with that water (which will refreeze). Any water that\n\tis not refrozed is considered runoff.\n\t'''\n\t\n\tporosity \t\t\t= 1 - self.rho/RHO_I # porosity\n\tdz_old \t\t\t\t= self.dz\n\tporespace_vol \t\t= porosity * self.dz #porosity in meters of each box\n\tmelt_volume_IE \t= self.snowmeltSec[iii] * S_PER_YEAR #meters\n\tmelt_volume_WE\t\t= melt_volume_IE * RHO_I_MGM #meters\n\tmelt_mass\t\t\t= melt_volume_WE * 1000. #kg\n\theat_to_freeze \t\t= melt_mass * LF_I #amount of heat needed to refreeze the melt (J)\n\n\tif (self.mass_sum==melt_mass).any():\n\t\texactmass = True\n\telse:\n\t\texactmass = False\n\n\tind1a \t\t\t\t= np.where(self.mass_sum<=melt_mass)[0] # indicies of boxes that will be melted away\n\tnum_boxes_melted \t= len(ind1a)+1 #number of boxes that melt away, include the box that is partially melted\n\tind1 \t\t\t\t= np.where(self.mass_sum>melt_mass)[0][0] \t# index which will become the new surface\n\n\t### pm is the partial melt (the box/volume that has a portion melted away)\n\tpm_mass\t\t\t\t= self.mass_sum[ind1] - melt_mass \t# the remaining mass of the PM box\n\tpm_dz \t\t\t\t= pm_mass / self.rho[ind1] #remaining thickness\n\tpm_porespace \t\t= (1 - self.rho[ind1]/RHO_I) * pm_dz #porespace in the PM box\n\tpm_rho \t\t\t\t= self.rho[ind1] #density of the PM box\n\n\tcold_content_0 \t\t= CP_I * self.mass * (T_MELT - self.Tz) #cold content of each box, i.e. how much heat to bring it to 273K\n\tcold_content_0_sum \t= cold_content_0.cumsum(axis=0)\n\tcold_content \t\t= cold_content_0[ind1:] #just the boxes that don't melt away\n\tcold_content[0] \t= CP_I * pm_mass * (T_MELT - self.Tz[ind1]) # the partial melt box has its cold content reassigned.\n\tcold_content_sum \t= cold_content.cumsum(axis=0)\n\n\tind2_rel \t\t\t= np.where(cold_content_sum>heat_to_freeze)[0][0] #freeze horizon index (where the wetting front freezes), index relative to ind1\n\tind2 \t\t\t\t= ind2_rel + ind1 #absolute index on real grid (we have not removed the melted boxes yet)\n\n\tif (self.rho[ind1:ind2+1]>830.0).any(): #if there is an ice lens somewhere between the new surface and where freezing should occur\n\t\tind2_rel \t\t\t\t= np.where(self.rho[ind1:]>=830.0)[0][0]\n\t\tind2 \t\t\t\t\t= ind2_rel + ind1 #the recalculated freezing front (water can not go past this level)\n\t\tcold_content_lens \t\t= cold_content[0:ind2_rel].sum() #cold content that is is available in space between the surface and the lens \t\n\t\trefreeze_mass \t\t\t= cold_content_lens / LF_I # this is how much should be able to refreeze in the available pore space above the ice lens.\n\t\tmelt_volume_WE \t\t\t= refreeze_mass / RHO_W_KGM\n\t\tmelt_volume_IE \t\t\t= melt_volume_WE / RHO_I_MGM #the volume of melt that is not runoff\n\t\trunoff_volume_duetolens = (melt_mass - refreeze_mass) * RHO_I_KGM\n\n\telse:\n\t\trunoff_volume_duetolens\t= 0.0\n\n\tpore_indices \t\t= np.arange(ind1,ind2+1) # indicies of the boxes that are available to fill with water\n\tpore_indices_flip \t= np.flipud(pore_indices)\n\tporespace_vol[ind1] = pm_porespace\n\tporespace_0_sum\t\t= porespace_vol.cumsum(axis=0)\n\tporespace \t\t\t= porespace_vol[ind1+1:ind2+1] #space available for the water\n\tporespace_sum \t\t= porespace.cumsum(axis=0)\n\tporespace_sum_flip \t= (np.flipud(porespace)).cumsum(axis=0)\n\tavailable_space \t= porespace_0_sum[ind2]-porespace_0_sum[ind1]\n\n\tif available_space < melt_volume_IE: # melt volume has already been recalculated based on how much can freeze with the cold content\n\n\t\trunoff_volume_duetolimitedporespace = (melt_volume_IE - porespace_sum) * RHO_I_MGM\n\n\t\tself.rho[ind1:ind2+1] \t= 870.0 #fill all of the boxes with water.\n\t\tself.Tz[ind1:ind2+1] \t= T_MELT\n\n\t\t# split up last box into several\n\t\tdivider \t\t= num_boxes_melted\n\t\tself.rho \t\t= np.concatenate((self.rho[ind1:-1] , self.rho[-1]*np.ones(num_boxes_melted)))\n\t\tself.age \t\t= np.concatenate((self.age[ind1:-1] , self.age[-1]*np.ones(num_boxes_melted)))\n\t\tself.dz \t\t= np.concatenate((self.dz[ind1:-1] , self.dz[-1]/divider*np.ones(num_boxes_melted)))\n\t\tself.dz[0] \t\t= pm_dz\n\t\tself.dzn \t\t= np.concatenate((np.zeros(num_boxes_melted), self.dz[1:])) #this is not quite right because is assumes compaction for the pm box is zero.\n\t\tself.dzn \t\t= self.dzn[0:self.compboxes]\n\t\tself.Tz \t\t= np.concatenate((self.Tz[ind1:-1],self.Tz[-1]*np.ones(num_boxes_melted)))\n\t\tself.bdot_mean \t= np.concatenate((self.bdot_mean[ind1:-1],self.bdot_mean[-1]*np.ones(num_boxes_melted)))\n\t\tself.z \t\t\t= self.dz.cumsum(axis = 0)\n\t\tself.z \t\t\t= np.concatenate(([0], self.z[:-1]))\n\t\tself.mass \t\t= self.rho*self.dz\n\t\n\telif available_space == 0.0: #the top layer is an ice lens, so the melt runs off\n\n\t\t# split up last box into several\n\t\tdivider \t\t= num_boxes_melted # ind3 should be removed and replaced with 2 new boxes.\n\t\tself.rho \t\t= np.concatenate((self.rho[ind1:-1] , self.rho[-1]*np.ones(num_boxes_melted)))\n\t\tself.age \t\t= np.concatenate((self.age[ind1:-1] , self.age[-1]*np.ones(num_boxes_melted)))\n\t\tself.dz \t\t= np.concatenate((self.dz[ind1:-1] , self.dz[-1]/divider*np.ones(num_boxes_melted)))\n\t\tself.dz[0] \t\t= pm_dz\n\t\tself.dzn \t\t= np.concatenate((np.zeros(num_boxes_melted), self.dz[1:])) #this is not quite right because is assumes compaction for the pm box is zero.\n\t\tself.dzn \t\t= self.dzn[0:self.compboxes]\n\t\tself.Tz \t\t= np.concatenate((self.Tz[ind1:-1],self.Tz[-1]*np.ones(num_boxes_melted)))\n\t\tself.bdot_mean \t= np.concatenate((self.bdot_mean[ind1:-1],self.bdot_mean[-1]*np.ones(num_boxes_melted)))\n\t\tself.z \t\t\t= self.dz.cumsum(axis = 0)\n\t\tself.z \t\t\t= np.concatenate(([0], self.z[:-1]))\n\t\tself.mass \t\t= self.rho*self.dz\n\n\telse:\n\t\trunoff_volume_duetolimitedporespace = 0\n\t\tind3a \t\t\t\t= np.where(porespace_sum_flip>melt_volume_IE)[0][0]\n\t\tind3 \t\t\t\t= ind2 - ind3a #the index of the node that is partially filled with water\n\t\tpartial_volume \t\t= melt_volume_IE - np.sum(porespace_vol[ind3+1:ind2+1]) # pore space filled in the box that is partially filled\n\t\tleftover_porespace \t= porespace_vol[ind3]-partial_volume #open pore space in the the partially-filled box\n\n\t\tnew_node_1_rho \t\t= self.rho[ind3] #split up the partial box into 2 parts\n\t\tnew_node_2_rho \t\t= 870.0\n\t\tnew_node_1_dz \t\t= leftover_porespace / (1 - self.rho[ind3]/RHO_I)\n\t\tnew_node_2_dz \t\t= self.dz[ind3] - new_node_1_dz\n\n\t\tself.rho[ind3+1:ind2+1] = 870.0\n\t\tself.Tz[ind1:ind2+1] \t= T_MELT\n\n\t\t# split up last box into several\n\t\tdivider \t\t= num_boxes_melted # ind3 should be removed and replaced with 2 new boxes.\n\t\tself.rho \t\t= np.concatenate((self.rho[ind1:ind3] , [new_node_1_rho,new_node_2_rho] , self.rho[ind3+1:-1] , self.rho[-1]*np.ones(num_boxes_melted-1)))\n\t\tself.age \t\t= np.concatenate((self.age[ind1:ind3] , [self.age[ind3],self.age[ind3]] , self.age[ind3+1:-1] , self.age[-1]*np.ones(num_boxes_melted-1)))\n\t\tdzhold \t\t\t= self.dz[ind1+1:ind3]\n\t\tdzhold2 \t\t= self.dz[ind3+1:-1]\n\t\tself.dz \t\t= np.concatenate((self.dz[ind1:ind3] , [new_node_1_dz,new_node_2_dz] , self.dz[ind3+1:-1] ,self.dz[-1]/divider*np.ones(num_boxes_melted-1)))\n\t\tself.dzn \t\t= np.concatenate((np.zeros(num_boxes_melted), np.append(dzhold, new_node_1_dz+new_node_2_dz), dzhold2))\n\t\tself.dzn \t\t= self.dzn[0:self.compboxes]\n\t\tself.dz[0] \t\t= pm_dz\n\t\tself.Tz \t\t= np.concatenate((self.Tz[ind1:ind3] , [self.Tz[ind3],self.Tz[ind3]] , self.Tz[ind3+1:-1] , self.Tz[-1]*np.ones(num_boxes_melted-1)))\n\t\tself.bdot_mean \t= np.concatenate((self.bdot_mean[ind1:ind3] , [self.bdot_mean[ind3],self.bdot_mean[ind3]] , self.bdot_mean[ind3+1:-1] , self.bdot_mean[-1]*np.ones(num_boxes_melted-1)))\n\t\tself.z \t\t\t= self.dz.cumsum(axis = 0)\n\t\tself.z \t\t\t= np.concatenate(([0], self.z[:-1]))\n\t\tself.mass \t\t= self.rho*self.dz\n\n\treturn self.rho, self.age, self.dz, self.Tz, self.z, self.mass, self.dzn\n\ndef percolation_bucket(self, iii):\n\n\t'''\n\tThis is the bucket scheme that allows liquid water to persist in the firn.\n\tIt includes consideration of irreducible liquid water content (LWC) an maximum\n\tLWC. Water that encounters a slab of a certain density (impermeable_rho) will\n\tnot percolate through.\n\n\tLWC is in volume (m^3), and since we are working in one dimension we assume\n\tthat \n\t'''\n\n\tmaxpore_f \t\t\t\t= 2.0 \t# factor by which the maximum filled porespace can exceed the irreducible saturation.\n\timpermeable_rho\t\t\t= 725. \t# impermeable lens density.\n\n\tif np.any(self.LWC<0):\n\t\tprint('ERROR: negative LWC')\n\t\tprint('(model will continue to run)')\n\n\tmelt_volume_IE \t\t= self.snowmeltSec[iii] * S_PER_YEAR \t# meters\n\tmelt_volume_WE\t\t\t= melt_volume_IE * RHO_I_MGM \t\t\t# meters\n\tmelt_mass\t\t\t\t= melt_volume_WE * 1000. \t\t\t\t# kg\n\theat_to_freeze \t\t\t= melt_mass * LF_I \t\t\t\t\t\t# amount of heat needed to refreeze the melt (J)\n\tind1a \t\t\t\t\t= np.where(self.mass_sum <= melt_mass)[0] \t# indicies of boxes that will be melted away\n\tnum_boxes_melted \t\t= len(ind1a)+1 \t\t\t\t\t\t\t\t# number of boxes that melt away, include the box that is partially melted\n\tind1 \t\t\t\t\t= np.where(self.mass_sum > melt_mass)[0][0] # index which will become the new surface\n\n\t### pm is the partial melt (the model volume that has a portion melted away)\n\tpm_mass \t\t\t\t= self.mass_sum[ind1] - melt_mass \t\t# the remaining mass of the PM box\n\tpm_dz \t\t\t\t\t= pm_mass / self.rho[ind1] \t\t\t\t# remaining thickness\n\tpm_porespace \t\t\t= (1 - self.rho[ind1]/RHO_I) * pm_dz \t# porespace in the PM box\n\tpm_rho \t\t\t\t\t= self.rho[ind1] \t\t\t\t\t\t# density of the PM box\n\tpm_lwc\t\t\t\t\t= self.LWC[ind1]/self.dz[ind1] * pm_dz\t# LWC of the PM box\n\n\tmelt_boxes_LWC_vol \t= np.sum(self.LWC[0:ind1+1]) - pm_lwc #include the water mass from the boxes that melt (currently does not include from the partial melt box)\n\tmelt_boxes_LWC_mass \t= melt_boxes_LWC_vol * RHO_W_KGM\n\tmelt_mass_a \t\t\t= melt_mass + melt_boxes_LWC_mass\n\tmelt_vol_a \t\t\t\t= melt_mass_a / RHO_W_KGM\n\n\t###################################\n\t### Regrid after melt\n\t### Melted boxes are accomodated by just adding more (new) boxes at the bottom of the column\n\t### Beware of this if you are not modeling to firn-ice transition depth.\n\tdivider \t\t\t\t= num_boxes_melted\n\tself.rho \t\t\t\t= np.concatenate((self.rho[ind1:-1] , self.rho[-1]*np.ones(num_boxes_melted)))\n\tself.LWC \t\t\t\t= np.concatenate((self.LWC[ind1:-1] , self.LWC[-1]*np.ones(num_boxes_melted)))\n\tself.LWC[0] \t\t\t= pm_lwc\n\tself.age \t\t\t\t= np.concatenate((self.age[ind1:-1] , self.age[-1]*np.ones(num_boxes_melted)))\n\t# self.dz \t\t\t\t= np.concatenate((self.dz[ind1:-1] , self.dz[-1]/divider*np.ones(num_boxes_melted))) # this splits the last box into many.\n\tself.dz \t\t\t\t= np.concatenate((self.dz[ind1:-1] , self.dz[-1]*np.ones(num_boxes_melted))) # this adds new boxes at the bottom.\n\tself.dz[0] \t\t\t\t= pm_dz\n\tself.Dcon \t\t\t\t= np.concatenate((self.Dcon[ind1:-1] , self.Dcon[-1]*np.ones(num_boxes_melted)))\n\tself.dzn \t\t\t\t= np.concatenate((np.zeros(num_boxes_melted), self.dz[1:])) #this is not quite right because is assumes compaction for the pm box is zero.\n\tself.dzn \t\t\t\t= self.dzn[0:self.compboxes]\n\tself.Tz \t\t\t\t= np.concatenate((self.Tz[ind1:-1] , self.Tz[-1]*np.ones(num_boxes_melted)))\n\tself.bdot_mean \t\t\t= np.concatenate((self.bdot_mean[ind1:-1] , self.bdot_mean[-1]*np.ones(num_boxes_melted)))\n\tself.z \t\t\t\t\t= self.dz.cumsum(axis = 0)\n\tself.z \t\t\t\t\t= np.concatenate(([0] , self.z[:-1]))\n\tself.mass \t\t\t\t= self.rho * self.dz\n\t###################################\n\n\t##########################################\n\t### now working all with the new grid ####\n\t##########################################\n\tporosity \t\t\t\t= 1 - self.rho / RHO_I \t\t# porosity (unitless)\n\tporespace_vol \t\t\t= porosity * self.dz \t\t# pore space volume (meters) of each box - volume of air + water\n\tporespace_air\t\t\t= porespace_vol - self.LWC \t# pore space that is filled with air (meters)\n\n\tcold_content\t\t\t= CP_I * self.mass * (T_MELT - self.Tz) # cold content of each box, i.e. how much heat to bring it to 273K (kJ)\n\tcold_content_sum \t\t= cold_content.cumsum(axis=0)\n\trefreeze_mass_pot \t\t= cold_content / LF_I \t\t\t\t\t# how much mass of the meltwater could be refrozen due to cold content\n\trefreeze_mass_pot_sum \t= refreeze_mass_pot.cumsum(axis=0) \n\n\t### calculate what the values will be after refreeze happens (pot stands for potential)\n\trho_pot\t\t\t\t\t= (self.mass + refreeze_mass_pot) / self.dz # what the mass of the boxes would be if the refreezemass refroze\n\tporosity_pot\t\t\t= 1 - rho_pot / RHO_I\n\tporespace_vol_pot\t\t= porosity_pot * self.dz\n\tporespace_air_pot\t\t= porespace_vol_pot - self.LWC\n\n\tWmi \t\t\t\t\t= 0.057 * (RHO_I - rho_pot) / rho_pot + 0.017 # water per snow-plus- water mass irreducible liquid water content, Langen eqn 3 unitless)\n\tSwi\t\t\t\t\t\t= Wmi / (1 - Wmi) * (rho_pot * RHO_I) / (1000 * (RHO_I - rho_pot)) \t#irreducible water saturation, volume of water per porespace volume (unitless), Colbeck 1972\n\n\tmaxpore \t\t\t\t= Swi * 2.0 # upper limit on what percentage of the porosity can be filled with water.\n\n\tmaxLWC1\t\t\t\t\t= porespace_vol * maxpore \t# maximum volume of water that can be stored in each node (meters)\n\tmaxLWC2\t\t\t\t\t= ((917.0 * self.dz) - self.mass) / RHO_W_KGM # double check that the LWC does not get too large. \n\tmaxLWC \t\t\t\t\t= np.minimum(maxLWC1 , maxLWC2)\n\tmaxLWC[self.rho>impermeable_rho] = 0\n\tmaxLWC_mass \t\t\t= maxLWC * RHO_W_KGM\t\t# mass of the maximum volume of water\n\tmaxLWC1_pot\t\t\t\t= porespace_vol_pot * maxpore \t# maximum volume of water that can be stored in each node (meters)\n\tmaxLWC2_pot\t\t\t\t= ((917.0 * self.dz) - (self.mass + refreeze_mass_pot)) / RHO_W_KGM # double check that the LWC does not get too large. \n\tmaxLWC_pot \t\t\t\t= np.minimum(maxLWC1_pot , maxLWC2_pot)\n\t# maxLWC_pot[rho_pot>impermeable_rho] = 0\n\tmaxLWC_mass_pot \t\t= maxLWC_pot * RHO_W_KGM\t\t# mass of the maximum volume of water\n\n\tirreducible_mass_pot \t= Swi * porespace_vol_pot * RHO_W_KGM # mass of irreducible water for each volume (potential - does not separate how much is already there)\n\tirreducible_vol_pot\t\t= irreducible_mass_pot / RHO_W_KGM\n\tliquid_storage_vol_pot\t= irreducible_vol_pot - self.LWC\n\tliquid_storage_mass_pot = liquid_storage_vol_pot * RHO_W_KGM\n\n\textra_liquid_mass\t\t= np.sum(self.LWC[self.LWC > irreducible_vol_pot] * RHO_W_KGM - irreducible_mass_pot[self.LWC > irreducible_vol_pot])\n\tstorage_mass_pot\t\t= liquid_storage_mass_pot + refreeze_mass_pot #how much can be refrozen plus how much will stick around due to capillary\n\tstorage_mass_pot_sum\t= storage_mass_pot.cumsum(axis=0)\n\ttotal_liquid_mass \t\t= melt_mass_a + extra_liquid_mass\n\t\n\ttry:\n\t\tind_p \t= np.where(storage_mass_pot_sum >= total_liquid_mass)[0][0] # the layer that water will percolate to\n\texcept: # all of the liquid is runoff.\n\t\tind_p \t= 0\n\t###################################\n\n\t### if there is an impermeable layer, block water from getting through\n\tif np.any(self.rho[0:ind_p+1] >= impermeable_rho):\n\n\t\tind_p \t\t\t\t\t= np.where(self.rho >= impermeable_rho)[0][0] #- 1 # the index of the node that has density greater than the impermeable density\n\t\tid1 \t\t\t\t\t= np.where(self.LWC > irreducible_vol_pot)[0] # indices where the liquid water content is greater than the irreducible\n\t\tid2 \t\t\t\t\t= id1[id1= total_liquid_mass:\n\t\t\ttotal_liquid_mass \t= 0\n\t\telse:\n\t\t\ttotal_liquid_mass \t= total_liquid_mass - mass_frozen\n\n\t\t### then, fill up the nodes above the ice slab\n\t\tmaxLWC_mass_pot_f \t\t= np.flipud(maxLWC_mass_pot[0:ind_p])\n\t\tmaxLWC_mass_pot_f_sum \t= maxLWC_mass_pot_f.cumsum(axis=0)\n\n\t\tif total_liquid_mass >= np.sum(maxLWC_mass_pot_f): # all porespace gets filled and there is runoff\t\t\n\t\t\tself.LWC[0:ind_p] \t\t= maxLWC_pot[0:ind_p] # each node gets the maximum allowed\n\t\t\t# stored_water_vol \t\t= np.sum(self.LWC[0:ind_p]) # can calculate how much runoff there is, need to consider how much LWC there was previously\n\t\t\n\t\telse: # fill up however much porespace is needed to accomodate the meltwater\n\n\t\t\tind_f \t\t\t\t\t= np.where(maxLWC_mass_pot_f_sum > total_liquid_mass)[0][0] #index on the flipped grid\n\t\t\tind_g \t\t\t\t\t= ind_p - 1 - ind_f #index on the real grid.\n\t\t\tself.LWC[ind_g+1:ind_p] = maxLWC_mass_pot[ind_g + 1:ind_p] / RHO_W_KGM # fill the indices up with the maximum allowed water\n\t\t\tlv_mass \t\t\t\t= total_liquid_mass - np.sum(maxLWC_mass_pot[ind_g + 1:ind_p]) \t# leftover volume\n\t\t\tself.LWC[ind_g] \t\t= lv_mass / RHO_W_KGM \t\t\t\t\t\t# put that into the ind_g node\n\t###################################\n\n\t\n\t### there is not an impermeable layer, water goes to layer ind_p\n\telif ind_p>0: \n\n\t\t### first, up to ind_p (not inclusive)\n\t\tself.mass[0:ind_p] \t\t= self.mass[0:ind_p] + refreeze_mass_pot[0:ind_p]\n\t\tself.rho[0:ind_p] \t\t= self.mass[0:ind_p] / self.dz[0:ind_p]\n\t\tlwc_old \t\t\t\t= np.copy(self.LWC)\n\t\tself.LWC[0:ind_p] \t\t= irreducible_mass_pot[0:ind_p] / RHO_W_KGM\n\t\tself.Tz[0:ind_p] \t\t= T_MELT\n\t\tlw_mass_retained \t\t= np.sum(refreeze_mass_pot[0:ind_p]) + np.sum(irreducible_mass_pot[0:ind_p]) - np.sum(lwc_old[0:ind_p] * RHO_W_KGM)\n\t\tlw_mass_remaining \t\t= total_liquid_mass - lw_mass_retained # mass left that will go into the ind_p node\n\n\t\t### now deal with the very last node where there may be just freezing or both freezing and some amount of retention\n\t\tif lw_mass_remaining <= refreeze_mass_pot[ind_p]: # all remaining water freezes\n\t\t\tlatent_heat_released \t= lw_mass_remaining * LF_I\n\t\t\tself.Tz[ind_p] \t\t\t= self.Tz[ind_p] + latent_heat_released / (CP_I * self.mass[ind_p])\n\t\t\tself.mass[ind_p] \t\t= self.mass[ind_p] + lw_mass_remaining\n\t\t\tself.rho[ind_p] \t\t= self.mass[ind_p] / self.dz[ind_p]\n\t\t\tself.LWC[ind_p] \t\t= 0\n\t\t\t\n\t\telse: \t# some refreeze, some sticks around \n\t\t\tself.mass[ind_p] \t\t= self.mass[ind_p] + refreeze_mass_pot[ind_p]\n\t\t\tself.rho[ind_p] \t\t= self.mass[ind_p] / self.dz[ind_p]\n\t\t\tself.LWC[ind_p] \t\t= (lw_mass_remaining - refreeze_mass_pot[ind_p]) / RHO_W_KGM\n\t\t\tself.Tz[ind_p] \t\t\t= T_MELT\n\t###################################\n\n\tself.LWC[self.LWC<0] = 0\n\n\treturn self.rho, self.age, self.dz, self.Tz, self.z, self.mass, self.dzn, self.LWC","sub_path":"firnmodel/CFM_main/melt.py","file_name":"melt.py","file_ext":"py","file_size_in_byte":18397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"111600751","text":"from os import listdir\nimport tensorflow as tf\nimport xml.etree.ElementTree as ET\nimport numpy as np\nfrom pickle import load\nimport utils.classes as classes\n\n\ndef weight_idx(tune, names):\n if tune.style in names:\n return names.index(tune.style)\n if tune.author in names:\n return names.index(tune.author)\n if \"ALL\" in names:\n return names.index(\"ALL\")\n return -1\n\n\ndef load_data(xml_directory, filters, batch_size):\n \n tunes, W = musicxml2tunes(xml_directory, filters)\n X = tunes2tensor(tunes)\n print(\"\\t{} tunes successfully loaded for training.\".format(len(tunes)))\n \n dataset = tf.data.Dataset.from_tensor_slices((X[:, :-1], X[:, 1:], W))\n dataset = dataset.shuffle(100).batch(batch_size)\n \n return dataset\n\n\ndef tunes2tensor(tunes):\n \n words_text2num = load(open(\"maps/words_text2num.txt\", 'rb'))\n \n print(\"\\nCREATING TENSORS FROM MUSICXML FILES...\")\n \n # Each tune has different length. Final tensor will have the max length of the whole data set\n max_len = max([len(tune) for tune in tunes])\n \n # Create and fill tensor (Batch x Sequence)\n all_tunes_int = []\n for tune in tunes:\n indexes = tune.index_form(words_text2num)\n # Pad with zeros\n while len(indexes) < max_len:\n indexes.append(0)\n all_tunes_int.append(indexes)\n return tf.convert_to_tensor(all_tunes_int, dtype=tf.int32)\n\n\ndef musicxml2tunes(xml_directory, filters):\n \"\"\" \n Function to go through the MusicXML files in xml_directory and convert them to Tune classes.\n Inputs:\n xml_directory: Name of the folder with the XML files\n filters:\n \"names\": Author or style to filter\n \"frac\": Corresponding desired fraction of each author/style\n Outputs:\n data: tensor with the dataset in one-hot form\n \"\"\"\n\n filters.setdefault('frac', None)\n filters.setdefault('names', None)\n\n frac = filters['frac']\n names = filters['names']\n\n print(\"\\nLOADING MUSICXML FILES...\\n\\tFilters: {}\\n\\tWeights: {}\".format(names, frac))\n\n # Validate that both frac and names are either None or lists\n if (not isinstance(frac, list) and frac is not None) or (not isinstance(names, list) and names is not None):\n raise Exception('Filters have to be in the form of lists')\n\n # If necessary, create names and frac list\n if names is None: # Apply trivial filter\n frac = [1.0]\n names = [\"ALL\"]\n elif frac is None: # Names were specified but frac didn't. Apply same frac to all\n frac = [1.0/len(names) for _ in len(names)]\n elif len(frac) != len(names): # Validate that frac and names are the same length\n raise Exception('Lists of filters and weights have to be the same size')\n\n # If the sum of the specified fractions is less than one, apply trivial filter to the remaining fraction\n if (1.0 - np.sum(frac)) >= 0.05:\n names.append(\"ALL\")\n frac.append(1.0-np.sum(frac))\n\n # Define list for instances of each class\n class_count = [0 for _ in range(len(names))]\n\n # Read all tunes from the xml_directory and create a list of Tune classes\n tunes = []\n tune_classes = []\n for file in listdir(xml_directory):\n tree = ET.parse(xml_directory + file)\n tune = classes.Progression(tree)\n\n # Get index within the name list\n idx = weight_idx(tune, names)\n if idx == -1: # Tune not to be considered\n continue\n else:\n class_count[idx] += 12\n for shift in range(12):\n tunes.append(classes.Progression(tree, shift))\n tune_classes.append(idx)\n print(\"\\t\\tAdded: {}\".format(tunes[-1].title))\n\n # Normalize count to compute class frequency\n class_count = np.array(class_count) / np.sum(class_count)\n\n # Get the weights for the loss function\n tune_weights = [frac[i]/class_count[i] for i in tune_classes]\n \n return tunes, tune_weights\n","sub_path":"utils/data_tools.py","file_name":"data_tools.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"628534858","text":"# -*- coding: utf-8 -*- \r\nimport web,os,sys,pymongo,re,base64,json,traceback,sys,logging, logging.handlers,bson\r\nfrom bson.json_util import dumps\r\nfrom bson.objectid import ObjectId\r\nfrom pymongo import MongoClient\r\nfrom datetime import datetime\r\nfrom datetime import timedelta\r\nfrom random import randint\r\nimport config\r\n\r\nreload(sys)\r\nsys.setdefaultencoding(\"utf-8\")\r\n\r\nlogger = logging.getLogger(__name__)\r\nlogger_err = logging.getLogger('err_log')\r\nfh = logging.handlers.RotatingFileHandler('../log/front.log',maxBytes=10485760 ,backupCount=50)\r\nfh.setLevel(logging.DEBUG)\r\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\r\nfh.setFormatter(formatter)\r\nlogger.addHandler(fh)\r\n\r\n\r\nclient = MongoClient(config.db['path'])\r\ndb = client.tisi\r\n\r\n\r\napp_path = os.path.dirname(__file__)\r\n\r\nsys.path.append(app_path)\r\nif app_path:\r\n os.chdir(app_path)\r\nelse:\r\n app_path = os.getcwd()\r\n\r\nfile_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'file_upload'))\r\n \r\n\r\nurls = (\r\n '/thr','Thr',\r\n '/tis','Tis',\r\n '/s','Search',\r\n '/d/(.*)','Detail',\r\n '/doc_file/(.*)/(.*)', 'GetFile',\r\n '/','Thr',\r\n\r\n\r\n '/(.*)','Thr'\r\n)\r\n \r\nweb.config.debug = True\r\nweb.config.session_parameters['cookie_path'] = '/'\\\r\n\r\n \r\napp = web.application(urls, globals(), autoreload=True) \r\nrender = web.template.render(os.path.abspath('templates'), base='base',cache=False,globals={'ctx':web.ctx,'http':web.http})\r\n\r\ndef list_file(id):\r\n li = []\r\n folder = file_path\r\n if os.path.isdir(os.path.join(folder,id)):\r\n for file in os.listdir(os.path.join(folder,id)):\r\n li.append(file)\r\n return li\r\n\r\nclass Thr:\r\n def GET(self):\r\n req = web.input(lang='th')\r\n return render.search_thr(req.lang)\r\n\r\nclass GetFile:\r\n def GET(self, doc_id, name):\r\n filedir = os.path.join(file_path,doc_id)\r\n try:\r\n f = open(os.path.join(filedir,name), 'r')\r\n return f.read()\r\n except:\r\n return web.internalerror()\r\n\r\nclass Tis:\r\n def GET(self):\r\n req = web.input(lang='th')\r\n return render.search_tis(req.lang)\r\n\r\nclass Detail:\r\n def GET(self, id=0):\r\n req = web.input(lang='th')\r\n if id == 0:\r\n return render.search_thr(req.lang, 102) \r\n \r\n cur = db.collections.find_one({'_id':int(id)})\r\n \r\n if cur == None:\r\n return render.search_thr(req.lang, 101) \r\n \r\n if cur.get('ics') != []:\r\n cur['ics'] = list(db.ics.find({'_id': {'$in': map(int,cur['ics'])}}))\r\n \r\n if cur.get('general_notes') != []:\r\n cur['general_notes'] = list(db.general_notes.find({'_id': {'$in': map(int,cur['general_notes'])}}))\r\n \r\n if cur.get('issuing_body') != []:\r\n cur['issuing_body'] = list(db.issuing_body.find({'_id': {'$in': map(int,cur['issuing_body'])}}))\r\n\r\n li_f = list_file(id)\r\n return render.detail(req.lang, cur, id, li_f)\r\n\r\nclass Search:\r\n def search(self, req):\r\n queryList = []\r\n if req['title'] !='':\r\n queryList.append({'$or': [\r\n {'title_1':{\"$regex\":req['title']}},\r\n {'title_2':{\"$regex\":req['title']}}\r\n ]})\r\n \r\n\r\n if req.get('tis_no') != None and req.get('tis_no') != '':\r\n queryList.append(\r\n {'prod_ref':{\"$regex\":req['tis_no']}}\r\n )\r\n\r\n if req['desc'] !='':\r\n desc = req['desc'].split(',')\r\n li = db.root_desc.find({'root_desc': {\"$in\": [re.compile(i, re.IGNORECASE) for i in desc]}})\r\n root_desc = [str(d['_id']) for d in li if 'root_desc' in d]\r\n \r\n li = db.free_desc.find({'free_desc': {\"$in\": [re.compile(i, re.IGNORECASE) for i in desc]}})\r\n free_desc = [str(d['_id']) for d in li if 'free_desc' in d]\r\n queryList.append(\r\n {'$or': [\r\n {'free_desc': {'$in': free_desc}},\r\n {'root_desc.root_desc': {'$in': root_desc}}\r\n ]}\r\n )\r\n \r\n if req['hs'] !='':\r\n list_hs = [re.compile(i, re.IGNORECASE) for i in req['hs'].split(',')]\r\n li = list(db.general_notes.find({'$or': [\r\n {'hs_tha': {'$in': list_hs}},\r\n {'hs_eng': {'$in': list_hs}}\r\n ]})\r\n )\r\n hs = [str(d['_id']) for d in li]\r\n queryList.append(\r\n {'general_notes': {'$in': hs}}\r\n )\r\n\r\n if req['ics'] !='':\r\n list_ics = [re.compile(i, re.IGNORECASE) for i in req['ics'].split(',')]\r\n li = list(db.ics.find({'$or': [\r\n {'ics_tha': {'$in': list_ics}},\r\n {'ics_eng': {'$in': list_ics}}\r\n ]})\r\n )\r\n ics = [str(d['_id']) for d in li]\r\n queryList.append(\r\n {'ics': {'$in': ics}}\r\n )\r\n \r\n if req['doc_grp'] =='tis':\r\n queryList.append(\r\n {'doc_grp':'tis'}\r\n )\r\n elif req['doc_grp'] =='thr':\r\n queryList.append({'$or': [\r\n {'$and': [\r\n {'doc_grp':'tis'},\r\n {'doc_type':'T'}\r\n ]},\r\n {'doc_grp':'thr'}\r\n ]})\r\n \r\n queryList.append({'$or': [\r\n {'status':'checked'},\r\n {'status':'upload'}\r\n ]})\r\n \r\n res = {}\r\n if len(queryList) == 0:\r\n cur = db.collections.find()\r\n else:\r\n cur = db.collections.find(\r\n {\r\n '$and': queryList\r\n },\r\n {'title_1':1, 'title_2':1, 'doc_grp':1, 'prod_ref':1}, limit=50).sort('_id', pymongo.ASCENDING)\r\n res['count'] = cur.count() \r\n res['data'] = list(cur)\r\n return (res)\r\n \r\n def GET(self):\r\n req = web.input(lang='th',root_desc='',doc_grp = 'thr')\r\n \r\n res = self.search(req);\r\n if res['count'] == 0:\r\n if req['doc_grp'] == 'thr':\r\n return render.search_thr(req.lang, 101)\r\n elif req['doc_grp'] == 'tis':\r\n return render.search_tis(req.lang, 101)\r\n else:\r\n return render.result(res,req,req.lang,req.get('doc_grp'))\r\n \r\nif __name__ == \"__main__\":\r\n web.httpserver.runsimple(app.wsgifunc(), (\"0.0.0.0\", 8081))\r\n","sub_path":"front/front.py","file_name":"front.py","file_ext":"py","file_size_in_byte":5801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"20285733","text":"def ReadFile(filename):\n names = [None] * 100\n genders = [None] * 100\n trip = [None] * 100\n\n with open(filename) as readfile:\n line = readfile.readline().rstrip(\"\\n\")\n x = 0\n while line:\n items = line.split(\",\")\n names[x] = items[0]\n genders[x] = items[1]\n trip[x] = items[2]\n line = readfile.readline().rstrip(\"\\n\")\n x+=1\n\n return names, genders, trip\n\n\ndef CountPupils(trip, tripname):\n counter = 0\n for x in range(len(trip)):\n if trip[x] == tripname:\n counter += 1\n return counter\n\n\ndef WriteFile(filename, data):\n with open(filename, \"w\") as writefile:\n writefile.write(str(data))\n print(\"data written to \" + filename)\n\nname, gender, trip = ReadFile(\"SchoolTrips.csv\")\n\nlon = CountPupils(trip, \"London\")\npar = CountPupils(trip, \"Paris\")\nbmx = CountPupils(trip, \"BMX\")\nmou = CountPupils(trip, \"Mountain Biking\")\ngor = CountPupils(trip, \"Gorge Walking\")\n\nWriteFile(\"LondonPupils.txt\", lon)\nWriteFile(\"ParisPupils.txt\", par)\nWriteFile(\"BMXPupils.txt\", bmx)\nWriteFile(\"MountainBikingPupils.txt\", mou)\nWriteFile(\"GorgeWalkingPupils.txt\", gor)\n\n","sub_path":"4.5/countpupils.py","file_name":"countpupils.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"167611201","text":"import os\nimport cv2\nimport sys\nimport math\nimport codecs\nimport pickle\nimport skimage\nimport numpy as np\nimport config as cfg\nimport selectivesearch\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\n\ndef if_intersection(xmin_a, xmax_a, ymin_a, ymax_a, xmin_b, xmax_b, ymin_b, ymax_b):\n if_intersect = False\n if xmin_a < xmax_b <= xmax_a and (ymin_a < ymax_b <= ymax_a or ymin_a <= ymin_b < ymax_a):\n if_intersect = True\n elif xmin_a <= xmin_b < xmax_a and (ymin_a < ymax_b <= ymax_a or ymin_a <= ymin_b < ymax_a):\n if_intersect = True\n elif xmin_b < xmax_a <= xmax_b and (ymin_b < ymax_a <= ymax_b or ymin_b <= ymin_a < ymax_b):\n if_intersect = True\n elif xmin_b <= xmin_a < xmax_b and (ymin_b < ymax_a <= ymax_b or ymin_b <= ymin_a < ymax_b):\n if_intersect = True\n else:\n return if_intersect\n if if_intersect:\n x_sorted_list = sorted([xmin_a, xmax_a, xmin_b, xmax_b])\n y_sorted_list = sorted([ymin_a, ymax_a, ymin_b, ymax_b])\n x_intersect_w = x_sorted_list[2] - x_sorted_list[1]\n y_intersect_h = y_sorted_list[2] - y_sorted_list[1]\n area_inter = x_intersect_w * y_intersect_h\n return area_inter\n\ndef IOU(ver1, vertice2):\n '''\n 用于计算两个矩形框的IOU\n :param ver1: 第一个矩形框\n :param vertice2: 第二个矩形框\n :return: 两个矩形框的IOU值\n '''\n # TODO 另一种思路实现IOU计算\n '''\n 另一种思路:\n 1、拿到两个多边形的顶点坐标。\n 2、根据两组坐标,分别画出两组坐标的mask。\n 3、统计255的点,就是面积;两组mask的交集就是重合面积,简单算一下就知道面积了。\n '''\n vertice1 = [ver1[0], ver1[1], ver1[0]+ver1[2], ver1[1]+ver1[3]]\n area_inter = if_intersection(vertice1[0], vertice1[2], vertice1[1], vertice1[3], vertice2[0], vertice2[2], vertice2[1], vertice2[3])\n if area_inter:\n area_1 = ver1[2] * ver1[3]\n area_2 = vertice2[4] * vertice2[5]\n iou = float(area_inter) / (area_1 + area_2 - area_inter)\n return iou\n return False\n\ndef clip_pic(img, rect):\n '''\n\n :param img: 输入的图片\n :param rect: rect矩形框的4个参数\n :return: 输入的图片中相对应rect位置的部分 与 矩形框的一对对角点和长宽信息\n '''\n x, y, w, h = rect[0], rect[1], rect[2], rect[3]\n x_1 = x + w\n y_1 = y + h\n return img[y:y_1, x:x_1, :], [x, y, x_1, y_1, w, h]\n\nclass Train_Alexnet_Data(object):\n \"\"\"\n 构建一个获取训练数据的类,主要有:\n 1、加载原始的训练数据。\n 2、对图像数据做resize,对label做one-hot\n 3、用pickle的格式将其保存。\n 4、提供get_batch方法\n \"\"\"\n def __init__(self):\n self.train_batch_size = cfg.T_batch_size\n self.image_size = cfg.Image_size\n\n self.train_list = cfg.Train_list\n self.train_class_num = cfg.T_class_num\n self.flower17_data =[]\n self.data = cfg.DATA\n # 创建文件夹,存放数据\n if not os.path.isdir(self.data):\n os.makedirs(self.data)\n\n self.epoch = 0\n self.cursor = 0\n self.load_17flowers()\n\n def load_17flowers(self,save_name = '17flowers.pkl'):\n # pkl 文件的保存路径\n save_path = os.path.join(self.data,save_name)\n\n\n if os.path.isfile(save_path):\n # 如果文件存在\n self.flower17_data = pickle.load(open(save_path,'rb'))\n else:\n # 如果文件不存在\n with codecs.open(self.train_list,'r','utf-8') as f:\n lines = f.readlines()\n for num,line in enumerate(lines):\n context = line.strip().split(' ')\n image_path = context[0]\n index = int(context[1])\n\n img = cv2.imread(image_path)\n img = cv2.resize(img,(self.image_size,self.image_size))\n img = np.asarray(img,dtype=np.float32)\n\n label = np.zeros(self.train_class_num)\n label[index]=1\n self.flower17_data.append([img,label])\n # view_bar(\"Process train_image of %s\" % image_path, num + 1, len(lines))\n pickle.dump(self.flower17_data,open(save_path,'wb'))\n\n def get_batch(self):\n '''\n 拿到每一轮的数据\n :return: \n '''\n images = np.zeros((self.train_batch_size,self.image_size,self.image_size,3))\n labels = np.zeros((self.train_batch_size,self.train_class_num))\n count = 0\n\n while count =len(self.flower17_data):\n self.cursor=0\n self.epoch+=1\n np.random.shuffle(self.flower17_data)\n print(self.epoch)\n return images,labels\n\nclass FineTurn_And_Predict_Data(object):\n \"\"\"\n 本��提供以下三个功能所需要的数据:\n 1、Fine-turn:【图像碎片,one-hot的label】\n 2、SVM:【图像碎片的特征vector,图像的类别】\n 3、Regresson:【图像碎片的特征vector,图像ground_truth+label】\n \"\"\"\n def __init__(self,solver=None,is_svm=False,is_save=True):\n self.solver = solver\n self.is_svm = is_svm\n self.is_save = is_save\n\n self.fineturn_list = cfg.Finetune_list # 加载fineturn用的数据列表\n self.image_size = cfg.Image_size\n self.F_class_num = cfg.F_class_num # 分类数目\n # 要回归的项目 数目 https://github.com/Liu-Yicheng/R-CNN/issues/1\n # 解释了为什么回归参数是5\n self.R_class_num = cfg.R_class_num\n\n self.fineturn_batch_size = cfg.F_batch_size\n self.Reg_batch_size = cfg.R_batch_size\n\n self.fineturn_save_path = cfg.Fineturn_save # 保存路径\n if not os.path.exists(self.fineturn_save_path):\n os.makedirs(self.fineturn_save_path)\n\n self.SVM_and_Reg_save_path = cfg.SVM_and_Reg_save # 保存路径\n if not os.path.exists(self.SVM_and_Reg_save_path):\n os.makedirs(self.SVM_and_Reg_save_path)\n\n '''\n 由于fineturn,svm和reg的训练都是通过碎片的形式进行的,所以要判断碎片是前景还是背景\n \n '''\n self.fineturn_threshold = cfg.F_fineturn_threshold # 小于threshold是背景,不然就按照label划分\n self.svm_threshold = cfg.F_svm_threshold # 小于threshold是背景,不然就按label划分\n self.reg_threshold = cfg.F_regression_threshold # 小于threshold是背景,其他是前景\n\n self.SVM_data_dic = {}\n self.Reg_data =[]\n self.fineturn_data = []\n\n self.cursor = 0\n self.epoch = 0\n print('lala')\n if self.is_svm:\n print('hehe')\n if len(os.listdir(self.SVM_and_Reg_save_path))==0:\n print('haha')\n self.load_2flowers()\n else:\n # fineturn的时候是走这里进函数\n if len(os.listdir(self.fineturn_save_path)) == 0:\n self.load_2flowers()\n\n self.load_from_npy()\n\n def load_2flowers(self):\n '''\n 加载数据用的\n :return: \n '''\n\n '''\n codecs的好处在于能够处理编码问题\n '''\n with codecs.open(self.fineturn_list,'r','utf-8') as f:\n lines = f.readlines()\n # 每行格式:\n # 2flowers/jpg/0/image_0561.jpg 2 90,126,350,434\n for num,line in enumerate(lines):\n # 对于每一张图片\n labels = []\n labels_bbox = []\n images = []\n context = line.strip().split(' ')\n\n image_path = context[0] # 图片路径\n ref_rect = context[2].split(',')\n ground_truth = [int(i) for i in ref_rect] # 图片中物体位置\n img = cv2.imread(image_path)\n '''\n 这里返回值有两个:img_lbl,和regions,由于我们只用的到第二个,所以不管第一个参数。\n regions是一个dict,key有:\n rect:碎片的[min_x,min_y,w,h]坐标,\n size:是什么mask_pixel/4的值,反正不是面积,\n labels:表示这个碎片由哪几个碎片组成\n 所以我们一般只要用regions的rect就好。\n '''\n img_lbl,regions = selectivesearch.selective_search(img,scale=500,sigma=0.9,min_size=10)\n candidate = set()\n for r in regions:\n '''\n 过滤掉一些不要的regions\n 下面这些的过滤条件真的不重复嘛…………\n '''\n if r['rect'] in candidate:\n continue\n if r['size'] <200: # 这个size到底是个什么鬼,shi 这个felzenszwalb segmentation的size\n continue\n if (r['rect'][2]*r['rect'][3])<500: # 真·面积小于500\n continue\n # 切割图片\n proposal_img,proposal_vertice = clip_pic(img,r['rect'])\n\n if len(proposal_img) == 0: # 有可能会切不出来嘛??\n continue\n x,y,w,h = r['rect']\n if w==0 or h ==0: # 前面不都验证过两个乘积要大于500才能往下嘛????\n continue\n [a,b,c] = np.shape(proposal_img)\n if a==0 or b==0 or c==0: # ????\n continue\n\n # 把碎片resize,并加入candidate\n resized_proposal_img = cv2.resize(proposal_img,(self.image_size,self.image_size))\n candidate.add(r['rect'])\n # 把碎片变成np的array数据类型\n img_float = np.asarray(resized_proposal_img,dtype='float32')\n\n '''\n 不同类型的过程,训练用的data不同\n '''\n if self.is_svm:\n '''\n 1、拿到碎片的特征值。\n 过这里是默认了 is_svm=True的时候,solver一定不是None嘛…\n 2、另外,images后面append的两个参数不同步,一个是array,一个是featuresmap呀\n '''\n feature = self.solver.predict([img_float])\n images.append(feature[0])\n else:\n images.append(img_float)\n\n # 计算真值和碎片框的iou\n iou_val = IOU(ground_truth,proposal_vertice)\n # 计算碎片框的中心点和长宽\n px = float(proposal_vertice[0]) + float(proposal_vertice[4]/2.0)\n py = float(proposal_vertice[1]) + float(proposal_vertice[5]/2.0)\n ph = float(proposal_vertice[5])\n pw = float(proposal_vertice[4])\n # ground的中心点和长宽\n gx = float(ground_truth[0])\n gy = float(ground_truth[1])\n gw = float(ground_truth[2])\n gh = float(ground_truth[3])\n\n index = int(context[1]) # 分类的label\n if self.is_svm:\n # 准备SVM数据的地方\n if iou_val= len(self.fineturn_data):\n self.cursor = 0\n self.epoch += 1\n np.random.shuffle(self.fineturn_data)\n print('epoch:',self.epoch)\n return images,labels\n\n def get_SVM_data(self,data_dir):\n images = []\n labels = []\n for index in range(len(self.SVM_data_dic[data_dir])):\n images.append(self.SVM_data_dic[data_dir][index][0])\n labels.append(self.SVM_data_dic[data_dir][index][1])\n return images,labels\n\n def get_Reg_batch(self):\n images = np.zeros((self.Reg_batch_size, 4096))\n labels = np.zeros((self.Reg_batch_size, self.R_class_num))\n count = 0\n while (count < self.Reg_batch_size):\n images[count] = self.Reg_data[self.cursor][0]\n labels[count] = self.Reg_data[self.cursor][1]\n count += 1\n self.cursor += 1\n if self.cursor >= len(self.Reg_data):\n self.cursor = 0\n self.epoch += 1\n np.random.shuffle(self.Reg_data)\n return images,labels","sub_path":"Day5_RCNN/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":17655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"227741934","text":"import os\nfrom flask import Flask, request, make_response, render_template\n\nfrom slackbot import bot\nfrom concurrent.futures import ThreadPoolExecutor\n\npyBot = bot.Bot()\napp = Flask(__name__)\n\n# DOCS https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor\nexecutor = ThreadPoolExecutor(4)\n\n\ndef _event_handler(event_type, slack_event):\n \"\"\"\n A helper function that routes events from Slack to our Bot\n by event type and subtype.\n \"\"\"\n team_id = slack_event[\"team_id\"]\n\n # ================ File created/shared events =============== #\n # A file is uploaded!\n if event_type in [\"file_created\", \"file_shared\"]:\n # pylint: disable=E1101\n file_id = slack_event[\"event\"][\"file_id\"]\n app.logger.info('Received \"%s\" event, file_id: %s, team_id: %s', event_type, file_id, team_id)\n executor.submit(pyBot.lookup_car_from_file, team_id, file_id)\n return make_response(\"File message received\", 202)\n\n # ============= Event Type Not Found! ============= #\n message = \"You have not added an event handler for the %s\" % event_type\n return make_response(message, 200, {\"X-Slack-No-Retry\": 1})\n\n\n@app.route(\"/install\", methods=[\"GET\"])\ndef pre_install():\n \"\"\"This route renders the installation page with 'Add to Slack' button.\"\"\"\n # Since we've set the client ID and scope on our Bot object, we can change\n # them more easily while we're developing our app.\n client_id = pyBot.oauth[\"client_id\"]\n scope = pyBot.oauth[\"scope\"]\n # Our template is using the Jinja templating language to dynamically pass\n # our client id and scope\n return render_template(\"install.html\", client_id=client_id, scope=scope)\n\n\n@app.route(\"/thanks\", methods=[\"GET\", \"POST\"])\ndef thanks():\n \"\"\"\n This route is called by Slack after the user installs our app. It will\n exchange the temporary authorization code Slack sends for an OAuth token\n which we'll save on the bot object to use later.\n\n To let the user know what's happened it will also render a thank you page.\n \"\"\"\n code_arg = request.args.get('code')\n # The bot's auth method to handles exchanging the code for an OAuth token\n pyBot.auth(code_arg)\n return render_template(\"thanks.html\")\n\n\n@app.route(\"/listening\", methods=[\"GET\", \"POST\"])\ndef hears():\n \"\"\"\n This route listens for incoming events from Slack and uses the event\n handler helper function to route events to our Bot.\n \"\"\"\n slack_event = request.get_json()\n\n # ============= Slack URL Verification ============ #\n # In order to verify the url of our endpoint, Slack will send a challenge\n # token in a request and check for this token in the response our endpoint\n # sends back.\n # For more info: https://api.slack.com/events/url_verification\n if \"challenge\" in slack_event:\n return make_response(slack_event[\"challenge\"], 200,\n {\"content_type\": \"application/json\"})\n\n # ============ Slack Token Verification =========== #\n # We can verify the request is coming from Slack by checking that the\n # verification token in the request matches our app's settings\n if pyBot.verification != slack_event.get(\"token\"):\n message = \"Invalid Slack verification token: %s \\npyBot has: \\\n %s\\n\\n\" % (slack_event[\"token\"], pyBot.verification)\n # By adding \"X-Slack-No-Retry\" : 1 to our response headers, we turn off\n # Slack's automatic retries during development.\n make_response(message, 403, {\"X-Slack-No-Retry\": 1})\n\n # ====== Process Incoming Events from Slack ======= #\n # If the incoming request is an Event we've subcribed to\n if \"event\" in slack_event:\n event_type = slack_event[\"event\"][\"type\"]\n # Then handle the event by event_type and have your bot respond\n return _event_handler(event_type, slack_event)\n\n # If our bot hears things that are not events we've subscribed to,\n # send a quirky but helpful error response\n return make_response(\"[NO EVENT IN SLACK REQUEST] These are not the droids\\\n you're looking for.\", 404, {\"X-Slack-No-Retry\": 1})\n\n\n@app.route(\"/kenteken\", methods=[\"POST\"])\n@app.route(\"/my_car\", methods=[\"POST\"])\ndef slack_commands():\n # pylint: disable=E1101\n\n form_dict = request.form\n\n command = form_dict['command']\n user_id = form_dict['user_id'] # Slack will render the Display name\n text = form_dict['text']\n\n if command == '/my_car':\n return make_response(pyBot.command_my_car(user_id, text))\n\n if command == '/kenteken':\n return make_response(pyBot.command_kenteken(text))\n\n app.logger.warning('Incoming slack command is not implemented: %s', command)\n return make_response(\"Unknown command...\")\n\n\n@app.route(\"/test\", methods=[\"GET\"])\ndef testing():\n if not app.debug:\n return make_response(\"Only available when DEBUG is enabled\", 405)\n\n params = request.args\n\n if 'file' in params.keys():\n file_path = os.path.join('/data', params.get('file'))\n result = list(pyBot.licenceplateExtractor.find_licenceplates(file_path))\n elif 'kenteken' in params.keys():\n result = pyBot.get_kenteken_details(params.get('kenteken'))\n else:\n return make_response('Usage examples:
'\n '
/test?file=IMG_3423.JPG
'\n '/test?kenteken=12AB34')\n return make_response(str(result))\n\n\nif __name__ == '__main__':\n is_debug_mode = os.environ.get('DEBUG', 'False').lower() in ['true', 'yes']\n app.run(debug=is_debug_mode, host='0.0.0.0')\n","sub_path":"slackbot/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":5706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"215894935","text":"\"\"\"Queries the Path of Exile API for json data which is parsed and displays\r\n relevent ladder information for a specific character. LINEAR SEARCH\"\"\"\r\nimport urllib.parse\r\nimport time\r\nimport requests\r\n\r\n# Enter league here (case sensitive)\r\nCURRENT_LEAGUE = \"LEAGUEHERE\"\r\n# Enter character name (case sensitive)\r\nCHARACTER_NAME = \"NAMEHERE\"\r\n# For class rank (case sensitive)\r\nCHARACTER_CLASS = \"CLASSHERE\"\r\n\r\ndef parse_url(offset, limit):\r\n \"\"\"Converts passed league name into a URL the browser understands\"\"\"\r\n parsed_url = \"http://api.pathofexile.com/ladders/\" + \\\r\n urllib.parse.quote(CURRENT_LEAGUE) + '?' + \\\r\n urllib.parse.urlencode({'offset': offset, 'limit': limit})\r\n return parsed_url\r\n\r\ndef find_character_initial(data):\r\n \"\"\"Finds the initial character rank. Returns dictionary of character data\"\"\"\r\n i = 0\r\n class_counter = 1\r\n death_counter = 0\r\n offset = 0\r\n limit = 200\r\n character_found = False\r\n\r\n # 15000 is last rank recorded\r\n while offset < 15000:\r\n # Increments the passed URL with the new offset\r\n parse_url(offset, limit)\r\n\r\n # While i hasn't iterated through all values on a page\r\n while i < limit - 1:\r\n # If the json data matches the character name\r\n if data[\"entries\"][i][\"character\"][\"name\"] == CHARACTER_NAME:\r\n character_found = True\r\n break\r\n elif data[\"entries\"][i][\"character\"][\"class\"] == CHARACTER_CLASS:\r\n # Increments the class rank counter\r\n class_counter += 1\r\n\r\n elif data[\"entries\"][i][\"dead\"] is True:\r\n # Increments the 'Rank Alive' counter\r\n death_counter += 1\r\n # Else if all entries have been scanned\r\n elif offset == 15000:\r\n character_found = False\r\n # Return false so main() can print an error\r\n return False\r\n # Increment i to scan the next value\r\n i += 1\r\n\r\n if character_found:\r\n # Creates a dictionary of the useful player information\r\n character_information = \\\r\n {\r\n \"name\": CHARACTER_NAME,\r\n \"class\": data[\"entries\"][i][\"character\"][\"class\"],\r\n \"experience\": data[\"entries\"][i][\"character\"][\"experience\"],\r\n \"rank\": data[\"entries\"][i][\"rank\"],\r\n \"class_rank\": class_counter,\r\n \"rank_alive\": death_counter\r\n }\r\n return character_information\r\n # Once all entries on a page are scanned, increase the offset by offset + 200\r\n offset += limit\r\n # Resets i to 0 so it can scan the next page again\r\n i = 0\r\n # Requests the next url e.g. offset is now 200 and limit is 200 -\r\n # http://api.pathofexile.com/ladders/Hardcore%20Legacy?offset=200&limit=200\r\n data = requests.get(parse_url(offset, limit)).json()\r\n\r\ndef print_data(information):\r\n \"\"\"Prints character information\"\"\"\r\n print(\"\\nName: \\t\\t{}\" .format(information[\"name\"]))\r\n print(\"Rank Overall: \\t{}\" .format(information[\"rank\"]))\r\n print(\"Rank Alive: \\t{}\" .format(information[\"rank_alive\"]))\r\n print(\"Class: \\t{}\" .format(information[\"class\"]))\r\n print(\"Ascendancy Rank:\\t{}\\t\" .format(information[\"class_rank\"]))\r\n print(\"Experience: \\t{:,}\" .format(information[\"experience\"]))\r\n\r\ndef main():\r\n # Get the URL of the ladder API\r\n url = parse_url(0, 200)\r\n initial_search = True\r\n # Parse the URL\r\n json_data = requests.get(url).json()\r\n print(\"\\nSearching for character...\")\r\n print(\"(may take a minute if you are low ranked)\")\r\n # Passes the json data to the initial search\r\n character_data = find_character_initial(json_data)\r\n if character_data is False:\r\n print(\"\\nERROR. CHARACTER NOT FOUND. EITHER API IS DOWN OR YOU ARE A >15000 SHITTER\\n\")\r\n print(\"Try again later or git gud.\")\r\n return\r\n print_data(character_data)\r\n while True:\r\n # Pass the data to the subsequent searches\r\n updated_data = find_character_initial(json_data)\r\n # Clear screen above\r\n print(\"\\n\" * 100)\r\n print_data(updated_data)\r\n time.sleep(15)\r\nmain()\r\n","sub_path":"LadderTrack.py","file_name":"LadderTrack.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"421782974","text":"import pandas as pd\nimport os\nimport matplotlib.pyplot as plt\n\npath_csv = os.getcwd() + '/project/henk_metrics_logger.csv'\ndf = pd.read_csv(path_csv)\n\n# Initiate figure\nfig, ax1 = plt.subplots()\n\n# Left side\ncolor = 'tab:red'\nax1.set_xlabel('Epoch')\nax1.set_ylabel('AUROC', color=color)\nax1.plot(df['Epoch'], df['val auc'], color=color)\nax1.plot(df[\"Epoch\"], df['train auc'], color=color, linestyle='dashed')\nax1.tick_params(axis='y', labelcolor=color)\n# plt.legend(['Validation AUC', 'Training AUC'], loc=\"center\")\n\n# Right side\nax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\ncolor = 'tab:blue'\nax2.set_ylabel('Loss', color=color) # we already handled the x-label with ax1\nax2.plot(df['Epoch'], df['val_loss'], color=color)\nax2.plot(df[\"Epoch\"], df['train_loss'], color=color, linestyle='dashed')\nax2.tick_params(axis='y', labelcolor=color)\n# plt.legend(['Validation loss', 'Training loss'], loc=\"center right\")\n\n\nleg = fig.legend(['Validation AUROC', 'Training AUROC', 'Validation loss', 'Training loss'], loc='center')\nif leg:\n leg.draggable()\n\n# fig.tight_layout() # otherwise the right y-label is slightly clipped\n\nplt.savefig(os.getcwd() + '/plotcode/' + 'i3dautoscore_Aucloss.pdf')\n\n","sub_path":"plotcode/i3d_rocloss.py","file_name":"i3d_rocloss.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"443172863","text":"# encoding: utf8\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('profile', '0002_neo_index'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='profile',\n name='provider_type',\n field=models.PositiveSmallIntegerField(choices=[(1, 'Twitter'), (2, 'Reddit')]),\n ),\n ]\n","sub_path":"src/aggregates/profile/migrations/0003_auto_20140522_2004.py","file_name":"0003_auto_20140522_2004.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"129677030","text":"import os\nimport sys\nimport random\nimport pyautogui\nfrom pywinauto.findwindows import (find_window,\n WindowNotFoundError)\nfrom pywinauto.win32functions import SetForegroundWindow\nimport subprocess\nimport time\n\n\nclass FileCrawler(object):\n\n \"\"\"\n Abstract base class for a file crawler.\n Defines the crawling class, is customized by\n overriding the is_desired() and touch() functions.\n \"\"\"\n\n def __init__(self, path, debug=False):\n self.debug = debug\n self.path = path\n self.chunk_size = 2**27\n\n def is_desired(self, file):\n pass\n\n def touch(self, path):\n pass\n\n def run(self):\n for parent, dirs, files in os.walk(self.path):\n if self.debug:\n print(\"Crawling through\", parent)\n for file in files:\n if self.debug:\n print(\"Inspecting\", file)\n if self.is_desired(file):\n print(\"Desire\", os.path.join(parent, file))\n self.touch(os.path.join(parent, file))\n\n\nclass OfficeCrawler(FileCrawler):\n\n \"\"\"\n File crawler specifically for MS Office files.\n \"\"\"\n\n WORD_EXTENSIONS = ['.doc', '.docx', '.docm', '.docb']\n EXCEL_EXTENSIONS = ['.xls', '.xlsx', 'xlsm', '.xlsb']\n OFFICE_EXTENSIONS = WORD_EXTENSIONS + EXCEL_EXTENSIONS\n\n WORD_EXE = r\"C:\\Program Files\\Microsoft Office\\Office14\\WINWORD.EXE\"\n EXCEL_EXE = r\"C:\\Program Files\\Microsoft Office\\Office14\\EXCEL.EXE\"\n\n def is_desired(self, file):\n \"\"\"\n A file is desired if it ends with an office extension.\n\n Args:\n file: The name of the file in question (ex: my_doc.docx)\n\n Returns:\n True if the file is desired, False if it is not\n \"\"\"\n\n name, ext = os.path.splitext(file)\n return not name.startswith('~') and ext.lower() in \\\n self.OFFICE_EXTENSIONS\n\n def lookAtWindow(self, title_re):\n \"\"\"\n Brings a window that matches title_re to the foreground.\n\n Args:\n title_re: A regular expression that matches the desired window.\n \"\"\"\n try:\n SetForegroundWindow(find_window(title_re=title_re))\n except WindowNotFoundError as e:\n print(e)\n\n def typeAndSave(self, msg):\n \"\"\"\n A matching keystroke is sent for each character in msg.\n Afterwards, ctrl+s is sent.\n \"\"\"\n keys = list(msg) + ['enter']\n pyautogui.typewrite(\n keys,\n interval=0.05)\n pyautogui.hotkey('ctrl', 's')\n\n time.sleep(1)\n\n def openProcess(self, path):\n name, ext = os.path.splitext(path)\n\n if ext.lower() in self.WORD_EXTENSIONS:\n return subprocess.Popen(\n [self.WORD_EXE, path]\n )\n\n elif ext.lower() in self.EXCEL_EXTENSIONS:\n return subprocess.Popen(\n [self.EXCEL_EXE, path]\n )\n\n def touch(self, path):\n \"\"\"\n Opens the file using the appropriate program (Excel or Word)\n and then randomly chooses to write and save to it or not.\n \"\"\"\n if self.debug:\n print(\"Touching\", path)\n\n dir_path, file_name = os.path.split(path)\n window_name_re = r'.*' + file_name + r'.*'\n\n process = self.openProcess(path)\n\n time.sleep(1)\n\n self.lookAtWindow(window_name_re)\n\n if self.debug:\n print(\"Read\", path)\n\n if random.random() < 0.5:\n time.sleep(0.5)\n self.typeAndSave('I am writing in word I did it father!')\n time.sleep(1)\n if self.debug:\n print(\"Wrote into\", path)\n process.terminate()\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n path_to_crawl = os.getcwd()\n else:\n path_to_crawl = sys.argv[1]\n\n crawler = OfficeCrawler(path_to_crawl, debug=True)\n crawler.run()\n","sub_path":"office_crawler.py","file_name":"office_crawler.py","file_ext":"py","file_size_in_byte":3956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"55345752","text":"from astropy.io import fits\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport mock_light_curves as mlc\nimport confusion_matrix as cf\nimport os\nimport scipy.signal\nimport time\nimport matplotlib.backends.backend_pdf as pdfs\n\nclass Light_Curve():\n kernel = 99\n alpha = 0.1\n beta = 0.25\n centroid_threshold = 0.5\n \n def __init__(self, tess_bjds, pdcsap_fluxes, centroid, templates):\n self.tess_bjds = tess_bjds\n self.pdcsap_fluxes = pdcsap_fluxes\n self.centroid = centroid\n \n self.flat_lcur = []\n self.valid_times = []\n self.valid_fluxes = []\n self.valid_centroid = []\n self.flat_centroid = []\n \n self.start_edge = 0\n self.end_edge = 0\n self.left_edge = 0\n self.right_edge = 0\n \n self.templates = templates\n self.result = False\n self.correlations = []\n self.best_correlation = 0\n self.best_template = self.templates[0]\n self.window = 0\n \n self.flag = \"Normal\"\n self.detection_indices = []\n self.real_detections = []\n self.ambiguous_real = []\n self.flare_detections = []\n self.ambiguous_flare = []\n self.centroid_detections = []\n \n \n def get_valid_data(self):\n notnan_times = []\n notnan_fluxes = []\n notnan_centroid = []\n \n for i in range(len(self.pdcsap_fluxes)):\n flux = self.pdcsap_fluxes[i]\n if not np.isnan(flux):\n notnan_times.append(self.tess_bjds[i])\n notnan_fluxes.append(self.pdcsap_fluxes[i])\n notnan_centroid.append(self.centroid[i])\n \n return notnan_times, notnan_fluxes, notnan_centroid\n \n def sigma_clip(self, flat_lcur_unclipped, notnan_times, notnan_fluxes, notnan_centroid):\n sigma_clip = 10*np.std(flat_lcur_unclipped)\n length = len(flat_lcur_unclipped)\n flat_lcur = []\n valid_times = []\n valid_fluxes = []\n valid_centroid = []\n for i in range(length):\n flux = flat_lcur_unclipped[i]\n if abs(flux) > sigma_clip:\n if i == 0:\n if flat_lcur_unclipped[i+1] > sigma_clip:\n flat_lcur.append(flux)\n valid_times.append(notnan_times[i])\n valid_fluxes.append(notnan_fluxes[i])\n valid_centroid.append(notnan_centroid[i])\n elif i == length-1:\n if flat_lcur_unclipped[i-1] > sigma_clip:\n flat_lcur.append(flux)\n valid_times.append(notnan_times[i])\n valid_fluxes.append(notnan_fluxes[i])\n valid_centroid.append(notnan_centroid[i])\n else:\n if flat_lcur_unclipped[i+1] > sigma_clip or flat_lcur_unclipped[i-1] > sigma_clip:\n flat_lcur.append(flux)\n valid_times.append(notnan_times[i])\n valid_fluxes.append(notnan_fluxes[i])\n valid_centroid.append(notnan_centroid[i])\n else:\n flat_lcur.append(flux)\n valid_times.append(notnan_times[i])\n valid_fluxes.append(notnan_fluxes[i])\n valid_centroid.append(notnan_centroid[i])\n \n return flat_lcur, valid_times, valid_fluxes, valid_centroid\n\n def get_edges(self, notnan_times):\n left_edge = None\n right_edge = None\n for k in range(len(notnan_times)):\n if notnan_times[k] - notnan_times[k-1] > 1:\n left_edge = notnan_times[k-1]\n right_edge = notnan_times[k]\n return left_edge, right_edge\n \n def match_filter(self):\n initial = True\n prev_positive_corr = None\n prev_result = False\n \n # perform cross-correlation for all template widths\n for template in self.templates: \n correlations = scipy.signal.correlate(self.flat_lcur, template, mode='valid')\n highest_corr = max(correlations)\n result = highest_corr > Light_Curve.alpha\n \n # choose best correlation result so far \n if initial or highest_corr > self.best_correlation:\n self.best_correlation = highest_corr\n self.result = result\n self.correlations = correlations\n self.best_template = template\n initial = False\n \n #break if new template has lower correlation than previous template\n if prev_result:\n if highest_corr < prev_positive_corr:\n break\n if result:\n prev_positive_corr = highest_corr\n \n prev_result = result\n \n def get_detection_indices(self):\n detection_indices = []\n prev_detection = False\n best_detection_info = None\n need_to_add = False\n corr_length = len(self.correlations)\n \n #find locations of positive detections\n for j in range(corr_length):\n if self.correlations[j] > Light_Curve.alpha:\n if not prev_detection:\n need_to_add = True\n best_detection_info = (j, self.correlations[j])\n else:\n if self.correlations[j] > best_detection_info[1]:\n best_detection_info = (j, self.correlations[j])\n prev_detection = True\n else:\n if need_to_add:\n detection_indices.append(best_detection_info[0])\n need_to_add = False\n prev_detection = False\n \n return detection_indices\n \n def classify_detections(self):\n nonedge_detections = []\n # find locations of non-edge detections\n for detection in self.detection_indices:\n t = self.valid_times[detection]\n if not self.is_edge_detection(t, self.start_edge, self.end_edge, self.left_edge, self.right_edge):\n nonedge_detections.append(detection)\n \n # all detections are edge detections so we flag as 'edge'\n if len(nonedge_detections) == 0:\n self.flag = 'edge'\n \n min_corr = min(self.correlations)\n # flag as transit if there is a stronger negative correlation than a positive one\n if self.flag == 'SL':\n if abs(min_corr) > 1.5 * self.best_correlation:\n self.flag = 'transit'\n \n # check centroid of detection against template to see if this is a valid detection\n candidate_detections = []\n if self.flag == 'SL':\n for detection in nonedge_detections:\n centroid_sample = mlc.offset_and_normalize(self.flat_centroid[detection:detection+self.window])\n centroid_template = mlc.offset_and_normalize(mlc.generate_centroid_template(1, self.window//3))\n centroid_template2 = np.flip(centroid_template)\n #detection_sample = mlc.offset_and_normalize(flat_lcur[detection:detection+window])\n centroid_corr = scipy.signal.correlate(centroid_sample, centroid_template, mode='valid')[0]\n centroid_corr2 = scipy.signal.correlate(centroid_sample, centroid_template2, mode='valid')[0]\n if centroid_corr >= Light_Curve.centroid_threshold or centroid_corr2 >= Light_Curve.centroid_threshold:\n self.centroid_detections.append((detection, self.best_template, None, None))\n else:\n candidate_detections.append(detection)\n # flag as centroid if detections are centroid detections\n if len(candidate_detections) == 0:\n self.flag = 'centroid'\n \n # find locations of all real and flare detections\n ambiguity = .05\n \n for detection in candidate_detections:\n time_window = self.valid_times[detection:detection+self.window]\n detection_sample = mlc.offset_and_normalize(self.flat_lcur[detection:detection+self.window])\n gaussian_template = mlc.offset_and_normalize(mlc.generate_template(1, self.window//3, gaps=True, detection_time=self.valid_times[detection+self.window//2], times=time_window))\n #flare_template = mlc.generate_flare_template(1, window//3, gaps=True, detection_time=valid_times[detection+window//2], times=time_window)\n gaussian_result = scipy.signal.correlate(detection_sample, self.best_template, mode='valid')[0]\n flare_result = 0\n best_flare_template = None\n best_flare_time_window = None\n for i in range(-5, 6):\n flare_time_window = self.valid_times[detection+i:detection+self.window+i]\n flare_template = mlc.offset_and_normalize(mlc.generate_flare_template(1, self.window//3, gaps=True, detection_time=self.valid_times[detection+self.window//2+i], times=flare_time_window))\n flare_window = mlc.offset_and_normalize(self.flat_lcur[detection+i:detection+self.window+i])\n flare_correlation = scipy.signal.correlate(flare_window, flare_template, mode='valid')[0]\n if flare_correlation > flare_result:\n flare_result = flare_correlation\n best_flare_template = flare_template\n best_flare_time_window = flare_time_window\n # compare correlation to gaussian and flare templates\n if flare_result > gaussian_result:\n # check for ambiguity between the gaussian and flare results\n if abs(flare_result - gaussian_result) < ambiguity:\n self.ambiguous_flare.append((detection, gaussian_template, best_flare_template, best_flare_time_window))\n else:\n self.flare_detections.append((detection, gaussian_template, best_flare_template, best_flare_time_window))\n else:\n if abs(gaussian_result - flare_result) < ambiguity:\n self.ambiguous_real.append((detection, gaussian_template, best_flare_template, best_flare_time_window))\n else:\n self.real_detections.append((detection, gaussian_template, best_flare_template, best_flare_time_window))\n \n # all detections are not SL detections so we flag as 'flare', 'ambiguousFlare', 'ambiguousSL' \n if self.flag == 'SL' and len(self.real_detections) == 0:\n if len(self.ambiguous_real) > 0:\n self.flag = 'ambiguousSL'\n elif len(self.ambiguous_flare) > 0:\n self.flag = 'ambiguousFlare'\n else:\n self.flag = 'flare'\n \n # check if the detections exceed the higher threshold\n if self.flag == 'SL':\n for detection in self.real_detections:\n detection_corr = self.correlations[detection[0]]\n if detection_corr > Light_Curve.beta:\n self.flag = 'highSL'\n break\n \n def is_edge_detection(self, t, start, end, left, right, cutoff=.5):\n return any([t <= start + cutoff, \n t >= end - cutoff, \n (t >= left - cutoff) and (t <= left),\n (t <= right + cutoff) and (t >= right)])\n \n def run_pipeline(self):\n # Take only valid data points \n notnan_times, notnan_fluxes, notnan_centroid = self.get_valid_data()\n \n # determine left and right edges for orbital gap\n self.left_edge, self.right_edge = self.get_edges(notnan_times)\n \n # subtract median filter to get high frequency noise for light curve\n flat_lcur_unclipped = mlc.offset_and_normalize(notnan_fluxes - scipy.signal.medfilt(notnan_fluxes, Light_Curve.kernel))\n \n # remove lone outliers to avoid fitting to outliers\n self.flat_lcur, self.valid_times, self.valid_fluxes, self.valid_centroid = self.sigma_clip(flat_lcur_unclipped, notnan_times, notnan_fluxes, notnan_centroid)\n \n # subtract median filder to get high frequency noise for centroid\n self.flat_centroid = mlc.offset_and_normalize(self.valid_centroid - scipy.signal.medfilt(self.valid_centroid, Light_Curve.kernel))\n \n # run match filter on the flat light curve with all templates\n self.match_filter()\n \n if self.result:\n self.window = len(self.best_template)\n self.start_edge = self.valid_times[0]\n self.end_edge = self.valid_times[len(self.correlations)-1]\n self.flag = 'SL'\n \n self.detection_indices = self.get_detection_indices()\n self.classify_detections()\n \n \n \ndef mf_pipeline(directory, result_foldername, mock=False, num_simulations=None):\n '''\n Pipeline runs a match filter on light curves and finds if the light curve \n matches a predetermined template.\n \n directory: string, directory where light curve .fits files are located\n result_foldername: string, location of resulting plots, data \n mock: bool, true if using mock data, False if using real light curve\n num_simulations: int, number of mock light curves to generate (if mock)\n \n returns: dict, results for each light curve (file)\n '''\n kernel = 99\n # generate templates varying width from 30mins to 2hrs (15 bins to 60 bins)\n templates = []\n widths = [5*j + 15 for j in range(10)] + [10*j + 70 for j in range(4)]\n for width in widths: \n templates.append(mlc.offset_and_normalize(mlc.generate_template(1, width)))\n \n if mock:\n num_bins = 10\n counter = 0\n \n # create folder for resulting plots\n if not os.path.exists(result_foldername):\n os.mkdir(result_foldername)\n \n # results array for completeness analysis of each variable\n i_actual = [[] for _ in range(num_bins)]\n i_predicted = [[] for _ in range(num_bins)]\n inclinations = [[] for _ in range(num_bins)]\n \n P_actual = [[] for _ in range(num_bins)]\n P_predicted = [[] for _ in range(num_bins)]\n periods = [[] for _ in range(num_bins)]\n \n mbh_actual = [[] for _ in range(num_bins)]\n mbh_predicted = [[] for _ in range(num_bins)]\n mbhs = [[] for _ in range(num_bins)]\n \n ms_actual = [[] for _ in range(num_bins)]\n ms_predicted = [[] for _ in range(num_bins)]\n mss = [[] for _ in range(num_bins)]\n \n noise_actual = [[] for _ in range(num_bins)]\n noise_predicted = [[] for _ in range(num_bins)]\n noises = [[] for _ in range(num_bins)]\n \n threshold_actual = [[] for _ in range(num_bins)]\n threshold_predicted = [[] for _ in range(num_bins)]\n thresholds = [[] for _ in range(num_bins)]\n \n total_actual = []\n total_predicted = []\n \n # generate bins\n cosi_bins = [j/num_bins * .01 for j in range(num_bins)]\n P_bins = [np.e**(j*(np.log(27)-np.log(1))/num_bins) for j in range(num_bins)]\n mbh_bins = [15*j/num_bins + 5 for j in range(num_bins)]\n ms_bins = [j/num_bins + 0.5 for j in range(num_bins)]\n noise_bins = np.array([0,50,100,150,250,500,750,1000,1500,2000]) \n altered_noise_bins = noise_bins * 10**(-6) + 10**(-9)\n threshold_bins = np.array([.01*i for i in range(10)]) \n \n start = time.time()\n for z in range(1, num_simulations+1):\n \n # randomly generate relevant parameters from known priors\n P = mlc.P_rng()\n M_BH = mlc.mbh_rng()\n M_S = mlc.ms_rng()\n i = mlc.i_rng()\n cosi = math.cos(i)\n noise = np.random.choice(altered_noise_bins)\n threshold = np.random.random() * .1\n \n # generate positive/negative signal at random\n pos_signal = np.random.choice([True, False])\n if pos_signal:\n lcur, EV, Beam, SL = mlc.generate_light_curve(P, i, M_BH, M_S=M_S, std=noise)\n else:\n lcur = mlc.generate_flat_signal(noise)\n \n # subtract median filter from signal and normalize for correlation analysis\n flat_lcur = lcur - scipy.signal.medfilt(lcur, kernel)\n flat_lcur = mlc.offset_and_normalize(flat_lcur)\n \n initial = True\n best_result = None\n best_corr = 0\n best_correlations = None\n best_template = None\n \n # perform cross-correlation for all template widths\n for template in templates: \n correlations = scipy.signal.correlate(flat_lcur, template)\n highest_corr = max(correlations)\n result = highest_corr > threshold\n \n # choose best correlation result so far (break on positive signal detection)\n if initial or highest_corr > best_corr:\n best_corr = highest_corr\n best_result = result\n best_correlations = correlations\n best_template = template\n initial = False\n \n if result:\n break\n \n # plot some light curves and their correlations at random based on prediction\n if (best_result != pos_signal and np.random.random() > .98) or (pos_signal and np.random.random() > .99):\n lc_folder = \"./{}/lc{}\".format(result_foldername, counter)\n if not os.path.exists(lc_folder):\n os.mkdir(lc_folder)\n mlc.plot_lc(lcur, P, M_BH, i, M_S, filename=\"{}/lcur{}.pdf\".format(lc_folder, counter), EV=EV if pos_signal else None, Beam=Beam if pos_signal else None, SL=SL if pos_signal else None)\n mlc.plot_lc(flat_lcur, P, M_BH, i, M_S, filename=\"{}/flat_lcur{}.pdf\".format(lc_folder, counter))\n mlc.plot_corr(best_correlations, P, M_BH, i, M_S, threshold, noise, \"{}/corr{}.pdf\".format(lc_folder, counter))\n counter += 1\n \n # bin result depending on parameter values\n if pos_signal or best_result:\n i_binned, P_binned, mbh_binned, ms_binned = False, False, False, False\n for k in range(num_bins-1,-1,-1):\n if not i_binned and cosi >= cosi_bins[k]:\n inclinations[k].append(i)\n i_actual[k].append(pos_signal)\n i_predicted[k].append(result)\n if not P_binned and P >= P_bins[k]:\n periods[k].append(i)\n P_actual[k].append(pos_signal)\n P_predicted[k].append(result)\n if not mbh_binned and M_BH >= mbh_bins[k]:\n mbhs[k].append(i)\n mbh_actual[k].append(pos_signal)\n mbh_predicted[k].append(result)\n if not ms_binned and M_S >= ms_bins[k]:\n mss[k].append(i)\n ms_actual[k].append(pos_signal)\n ms_predicted[k].append(result)\n if all([i_binned, P_binned, mbh_binned, ms_binned]):\n break\n \n threshold_binned, noise_binned = False, False\n for k in range(num_bins-1,-1,-1):\n if not threshold_binned and threshold >= threshold_bins[k]:\n thresholds[k].append(i)\n threshold_actual[k].append(pos_signal)\n threshold_predicted[k].append(result)\n if not noise_binned and noise == altered_noise_bins[k]:\n noises[k].append(i)\n noise_actual[k].append(pos_signal)\n noise_predicted[k].append(result)\n if noise_binned and threshold_binned:\n break\n \n total_actual.append(pos_signal)\n total_predicted.append(best_result) \n \n # perform completeness analysis\n if z%1000 == 0 and z != 0:\n print('{} simulations complete'.format(z))\n prefix = \"./{}/\".format(result_foldername)\n plot_completeness(r'$\\alpha$', threshold_bins, threshold_actual, threshold_predicted, num_bins, prefix + 'alpha', scale=True)\n plot_completeness('Noise [ppm]', noise_bins, noise_actual, noise_predicted, num_bins, prefix + 'noise', scale=True)\n plot_completeness('cosi', cosi_bins, i_actual, i_predicted, num_bins, prefix + 'cosi', scale=True)\n plot_completeness('Period [days]', P_bins, P_actual, P_predicted, num_bins, prefix + 'period')\n plot_completeness(r'$M_{BH} [M_{\\odot}]$', mbh_bins, mbh_actual, mbh_predicted, num_bins, prefix + 'mbh')\n plot_completeness(r'$M_{\\star} [M_{\\odot}]$', ms_bins, ms_actual, ms_predicted, num_bins, prefix + 'ms')\n \n end = time.time()\n print(\"{} minutes\".format(round((end - start)/60, 2)))\n \n return total_actual, total_predicted\n \n else:\n counter = 1\n num_files = 0\n flags = ['SL', 'edge', 'transit', 'flare', 'highSL', 'ambiguousSL', 'ambiguousFlare', 'centroid']\n results = {flag: set() for flag in flags}\n if not os.path.exists(directory+result_foldername):\n os.mkdir(directory+result_foldername)\n for flag in flags:\n os.mkdir(directory + result_foldername + '/' + flag)\n \n for filename in os.listdir(directory):\n num_files += 1\n if num_files%500 == 0:\n print('{} files completed'.format(num_files))\n \n if filename.endswith(\".fits\"):\n fits_file = directory + filename\n try:\n with fits.open(fits_file, mode=\"readonly\", memmap=False) as hdulist:\n tess_bjds = hdulist[1].data['TIME']\n #sap_fluxes = hdulist[1].data['SAP_FLUX']\n pdcsap_fluxes = hdulist[1].data['PDCSAP_FLUX']\n centroid = hdulist[1].data['MOM_CENTR1']\n except:\n print('Could not open file: {}'.format(filename))\n continue\n \n # Create a light curve object with the file data and run pipeline on it\n lcur_object = Light_Curve(tess_bjds, pdcsap_fluxes, centroid, templates)\n lcur_object.run_pipeline()\n \n if lcur_object.result:\n results[flag].add(filename)\n folder = '{}{}/{}'.format(directory, result_foldername, flag)\n pdf = pdfs.PdfPages('{}/light_curve{}.pdf'.format(folder, counter))\n counter += 1\n \n # create plot for full light curve data\n fig, ax = plt.subplots(5, sharex=True, figsize=(6, 10))\n #fig.suptitle(filename)\n \n # plot light curve\n ax[0].set_title(filename)\n ax[0].plot(lcur_object.valid_times, lcur_object.valid_fluxes, 'ko', rasterized=True, markersize=1)\n ax[0].set_ylabel('PDCSAP Flux')\n \n # plot flat light curve\n ax[1].plot(lcur_object.valid_times, lcur_object.flat_lcur, 'ko', rasterized=True, markersize=1)\n ax[1].set_ylabel('Relative Flux')\n \n # plot correlation\n ax[2].plot(lcur_object.valid_times[:len(lcur_object.correlations)], lcur_object.correlations, 'ko', rasterized=True, markersize=1)\n ax[2].plot([lcur_object.valid_times[0], lcur_object.valid_times[len(lcur_object.correlations)-1]], [Light_Curve.alpha, Light_Curve.alpha], '--', color='orange', rasterized=True)\n if lcur_object.flag == 'highSL':\n ax[2].plot([lcur_object.valid_times[0], lcur_object.valid_times[len(lcur_object.correlations)-1]], [Light_Curve.beta, Light_Curve.beta], 'b--', rasterized=True)\n ax[2].set_ylabel('Correlation')\n \n # plot centroid\n ax[3].plot(lcur_object.valid_times, lcur_object.valid_centroid, 'ko', rasterized=True, markersize=1)\n ax[3].set_ylabel('Centroid')\n \n # plot flat centroid\n ax[4].plot(lcur_object.valid_times, lcur_object.flat_centroid, 'ko', rasterized=True, markersize=1)\n ax[4].set_ylabel('Relative Centroid')\n ax[4].set_xlabel('Time [days]')\n \n plt.tight_layout()\n pdf.savefig(fig)\n plt.close()\n \n # zoomed in plot on location of each real positive detection\n for detection in lcur_object.real_detections:\n plot_detection(detection[0], lcur_object.window, 'SL', pdf, detection[1], \n detection[2], detection[3], lcur_object.valid_times, lcur_object.valid_fluxes, \n lcur_object.flat_lcur, lcur_object.correlations, lcur_object.valid_centroid, \n lcur_object.flat_centroid)\n \n # zoomed in plot on location of each ambiguous positive detection\n for detection in lcur_object.ambiguous_real:\n plot_detection(detection[0], lcur_object.window, 'Ambiguous SL', pdf, detection[1], \n detection[2], detection[3], lcur_object.valid_times, lcur_object.valid_fluxes, \n lcur_object.flat_lcur, lcur_object.correlations, lcur_object.valid_centroid, \n lcur_object.flat_centroid)\n \n # zoomed in plot on location of each ambiguous flare detection\n for detection in lcur_object.ambiguous_flare:\n plot_detection(detection[0], lcur_object.window, 'Ambiguous Flare', pdf, detection[1], \n detection[2], detection[3], lcur_object.valid_times, lcur_object.valid_fluxes, \n lcur_object.flat_lcur, lcur_object.correlations, lcur_object.valid_centroid, \n lcur_object.flat_centroid)\n \n # zoomed in plot on location of each flare detection\n for detection in lcur_object.flare_detections:\n plot_detection(detection[0], lcur_object.window, 'Flare', pdf, detection[1], \n detection[2], detection[3], lcur_object.valid_times, lcur_object.valid_fluxes, \n lcur_object.flat_lcur, lcur_object.correlations, lcur_object.valid_centroid, \n lcur_object.flat_centroid)\n # zoomed in plot on location of each centroid detection\n for detection in lcur_object.centroid_detections:\n plot_detection(detection[0], lcur_object.window, 'Centroid', pdf, detection[1], \n detection[2], detection[3], lcur_object.valid_times, lcur_object.valid_fluxes, \n lcur_object.flat_lcur, lcur_object.best_correlations, lcur_object.valid_centroid, \n lcur_object.flat_centroid)\n \n pdf.close()\n \n # make a pie chart of the distribution of light curves in each bin\n pie_slices = [len(results[flag]) for flag in flags]\n plt.figure()\n plt.title('Distribution of Positive Detections')\n plt.pie(pie_slices, labels=flags)\n plt.savefig(directory + result_foldername + \"/distribution.pdf\")\n plt.close()\n \n\ndef plot_completeness(variable, bins, actual, predicted, num_bins, path_prefix, scale=False):\n accs = []\n pres = []\n recs = []\n F1s = []\n for l in range(num_bins):\n cm, acc, pre, rec, F1 = cf.confusion_matrix(actual[l], predicted[l])\n accs.append(acc)\n pres.append(pre)\n recs.append(rec)\n F1s.append(F1)\n \n # plot accuracy\n plt.figure()\n plt.xlabel(variable)\n plt.ylabel('Accuracy')\n if scale:\n plt.plot([b for b in bins], accs, 'ko', rasterized=True)\n else:\n plt.plot([str(round(b, 2)) for b in bins], accs, 'ko', rasterized=True)\n filename = '{}_accuracy.pdf'.format(path_prefix, variable)\n plt.tight_layout()\n plt.savefig(filename)\n plt.close()\n \n #plot precision\n plt.figure()\n plt.xlabel(variable)\n plt.ylabel('Precision')\n if scale:\n plt.plot([b for b in bins], pres, 'ko', rasterized=True)\n else:\n plt.plot([str(round(b, 2)) for b in bins], pres, 'ko', rasterized=True)\n filename = '{}_precision.pdf'.format(path_prefix, variable)\n plt.tight_layout()\n plt.savefig(filename)\n plt.close()\n \n # plot recall\n plt.figure()\n plt.xlabel(variable)\n plt.ylabel('Recall')\n if scale:\n plt.plot([b for b in bins], recs, 'ko', rasterized=True)\n else:\n plt.plot([str(round(b, 2)) for b in bins], recs, 'ko', rasterized=True)\n filename = '{}_recall.pdf'.format(path_prefix, variable)\n plt.tight_layout()\n plt.savefig(filename)\n plt.close()\n \n # plot F1\n plt.figure()\n plt.xlabel(variable)\n plt.ylabel('F1')\n if scale:\n plt.plot([b for b in bins], F1s, 'ko', rasterized=True)\n else:\n plt.plot([str(round(b, 2)) for b in bins], F1s, 'ko', rasterized=True)\n filename = '{}_F1.pdf'.format(path_prefix, variable)\n plt.tight_layout()\n plt.savefig(filename)\n plt.close()\n\ndef noise_test(result_foldername):\n if not os.path.exists(result_foldername):\n os.mkdir(result_foldername)\n noise_values = np.array([0,50,100,150,200,300,400,500,750,1000]) \n altered_noise = noise_values * 10**(-6) + 10**(-9)\n actuals = []\n predicteds = []\n for noise in altered_noise:\n actual, predicted = mf_pipeline(None, result_foldername, noise=noise, mock=True, num_simulations=1000)\n actuals.append(actual)\n predicteds.append(predicted)\n \n plot_completeness('Noise [ppm]', noise_values, actuals, predicteds, 10, \"./{}/noise\".format(result_foldername), scale=True)\n return actuals, predicteds\n\ndef alpha_test(result_foldername):\n if not os.path.exists(result_foldername):\n os.mkdir(result_foldername)\n alpha_values = np.array([.005*i for i in range(1, 11)]) \n actuals = []\n predicteds = []\n for alpha in alpha_values:\n actual, predicted = mf_pipeline(None, result_foldername, mock=True, num_simulations=1000, threshold=alpha)\n actuals.append(actual)\n predicteds.append(predicted)\n \n plot_completeness('Alpha', alpha_values, actuals, predicteds, 10, \"./{}/alpha\".format(result_foldername), scale=True)\n return actuals, predicteds\n \ndef plot_detection(detection, window, flag, pdf, best_template, flare_template, flare_time_window, times, lcur, flat_lcur, correlations, centroid, flat_centroid):\n fig, ax = plt.subplots(5, sharex=True, figsize=(6, 10))\n time_window = times[detection:detection+window]\n \n # plot light curve window\n detection_sample = lcur[detection:detection+window]\n ax[0].set_title('{} Detection at t = {}'.format(flag, round(times[detection],2)))\n ax[0].plot(time_window, detection_sample, 'ko', rasterized=True, markersize=2)\n ax[0].set_ylabel('PDCSAP Flux')\n \n # plot flat light curve window\n flat_detection = mlc.offset_and_normalize(flat_lcur[detection:detection+window])\n ax[1].plot(time_window, flat_detection, 'ko', rasterized=True, markersize=2)\n ax[1].plot(time_window, best_template, 'bo', rasterized=True, markersize=1)\n if flare_template is not None:\n ax[1].plot(flare_time_window, flare_template, 'go', rasterized=True, markersize=1)\n ax[1].legend([\"Detection\", \"Gaussian\", \"Flare\"])\n else:\n ax[1].legend([\"Detection\", \"Gaussian\"])\n ax[1].set_ylabel('Relative Flux')\n \n # plot correlation window\n correlation_window = correlations[detection-window//2:detection+window-window//2]\n ax[2].plot(times[detection:detection+window], correlation_window, 'ko', rasterized=True, markersize=2)\n ax[2].set_ylabel('Correlation')\n \n # plot centroid\n centroid_window = centroid[detection:detection+window]\n ax[3].plot(time_window, centroid_window, 'ko', rasterized=True, markersize=2)\n ax[3].set_ylabel('Centroid')\n \n # plot flat centroid\n flat_centroid_window = flat_centroid[detection:detection+window]\n ax[4].plot(time_window, flat_centroid_window, 'ko', rasterized=True, markersize=2)\n ax[4].set_ylabel('Rlative Centroid')\n \n plt.tight_layout()\n pdf.savefig(fig)\n plt.close()\n\n\n\nfoldername = input(\"Input name of new results folder: \")\nos.mkdir(foldername)\n\ntemplates = []\nwidths = [5*j + 15 for j in range(10)] + [10*j + 70 for j in range(4)]\nfor width in widths: \n templates.append(mlc.offset_and_normalize(mlc.generate_template(1, width)))\n \nfor sector in range(15, 29):\n flags = ['SL', 'edge', 'transit', 'flare', 'highSL', 'ambiguousSL', 'ambiguousFlare', 'centroid']\n results = {flag: set() for flag in flags}\n if not os.path.exists(\"{}/Sector{}\".format(foldername, sector)):\n os.mkdir(\"{}/Sector{}\".format(foldername, sector))\n os.mkdir(\"{}/Sector{}/SL_Files\".format(foldername, sector))\n for flag in flags:\n os.mkdir(\"{}/Sector{}/{}\".format(foldername, sector, flag))\n \n download_file = \"tesscurl_sector_{}_lc.sh\".format(sector)\n with open(download_file) as curl_file:\n for command in curl_file:\n split_command = command.split(\" \")\n if len(split_command) > 1:\n os.system(command)\n lcur_file = split_command[5]\n tickID = lcur_file.split(\"-\")[2]\n \n try:\n with fits.open(lcur_file, mode=\"readonly\", memmap=False) as hdulist:\n tess_bjds = hdulist[1].data['TIME']\n #sap_fluxes = hdulist[1].data['SAP_FLUX']\n pdcsap_fluxes = hdulist[1].data['PDCSAP_FLUX']\n centroid = hdulist[1].data['MOM_CENTR1']\n except:\n print('Could not open file: {}'.format(lcur_file))\n continue\n \n # Create a light curve object with the file data and run pipeline on it\n lcur_object = Light_Curve(tess_bjds, pdcsap_fluxes, centroid, templates)\n lcur_object.run_pipeline()\n \n if lcur_object.result:\n results[flag].add(tickID)\n folder = '{}/Sector{}/{}'.format(foldername, sector, lcur_object.flag)\n pdf = pdfs.PdfPages('{}/lcur_{}.pdf'.format(folder, tickID))\n \n # create plot for full light curve data\n fig, ax = plt.subplots(5, sharex=True, figsize=(6, 10))\n #fig.suptitle(filename)\n \n # plot light curve\n ax[0].set_title(\"Light Curve {}\".format(tickID))\n ax[0].plot(lcur_object.valid_times, lcur_object.valid_fluxes, 'ko', rasterized=True, markersize=1)\n ax[0].set_ylabel('PDCSAP Flux')\n \n # plot flat light curve\n ax[1].plot(lcur_object.valid_times, lcur_object.flat_lcur, 'ko', rasterized=True, markersize=1)\n ax[1].set_ylabel('Relative Flux')\n \n # plot correlation\n ax[2].plot(lcur_object.valid_times[:len(lcur_object.correlations)], lcur_object.correlations, 'ko', rasterized=True, markersize=1)\n ax[2].plot([lcur_object.valid_times[0], lcur_object.valid_times[len(lcur_object.correlations)-1]], [Light_Curve.alpha, Light_Curve.alpha], '--', color='orange', rasterized=True)\n if lcur_object.flag == 'highSL':\n ax[2].plot([lcur_object.valid_times[0], lcur_object.valid_times[len(lcur_object.correlations)-1]], [Light_Curve.beta, Light_Curve.beta], 'b--', rasterized=True)\n ax[2].set_ylabel('Correlation')\n \n # plot centroid\n ax[3].plot(lcur_object.valid_times, lcur_object.valid_centroid, 'ko', rasterized=True, markersize=1)\n ax[3].set_ylabel('Centroid')\n \n # plot flat centroid\n ax[4].plot(lcur_object.valid_times, lcur_object.flat_centroid, 'ko', rasterized=True, markersize=1)\n ax[4].set_ylabel('Relative Centroid')\n ax[4].set_xlabel('Time [days]')\n \n plt.tight_layout()\n pdf.savefig(fig)\n plt.close()\n \n # zoomed in plot on location of each real positive detection\n for detection in lcur_object.real_detections:\n plot_detection(detection[0], lcur_object.window, 'SL', pdf, detection[1], \n detection[2], detection[3], lcur_object.valid_times, lcur_object.valid_fluxes, \n lcur_object.flat_lcur, lcur_object.correlations, lcur_object.valid_centroid, \n lcur_object.flat_centroid)\n \n # zoomed in plot on location of each ambiguous positive detection\n for detection in lcur_object.ambiguous_real:\n plot_detection(detection[0], lcur_object.window, 'Ambiguous SL', pdf, detection[1], \n detection[2], detection[3], lcur_object.valid_times, lcur_object.valid_fluxes, \n lcur_object.flat_lcur, lcur_object.correlations, lcur_object.valid_centroid, \n lcur_object.flat_centroid)\n \n # zoomed in plot on location of each ambiguous flare detection\n for detection in lcur_object.ambiguous_flare:\n plot_detection(detection[0], lcur_object.window, 'Ambiguous Flare', pdf, detection[1], \n detection[2], detection[3], lcur_object.valid_times, lcur_object.valid_fluxes, \n lcur_object.flat_lcur, lcur_object.correlations, lcur_object.valid_centroid, \n lcur_object.flat_centroid)\n \n # zoomed in plot on location of each flare detection\n for detection in lcur_object.flare_detections:\n plot_detection(detection[0], lcur_object.window, 'Flare', pdf, detection[1], \n detection[2], detection[3], lcur_object.valid_times, lcur_object.valid_fluxes, \n lcur_object.flat_lcur, lcur_object.correlations, lcur_object.valid_centroid, \n lcur_object.flat_centroid)\n # zoomed in plot on location of each centroid detection\n for detection in lcur_object.centroid_detections:\n plot_detection(detection[0], lcur_object.window, 'Centroid', pdf, detection[1], \n detection[2], detection[3], lcur_object.valid_times, lcur_object.valid_fluxes, \n lcur_object.flat_lcur, lcur_object.correlations, lcur_object.valid_centroid, \n lcur_object.flat_centroid)\n \n pdf.close()\n \n if lcur_object.flag in [\"SL\", \"highSL\"]:\n os.system(\"mv {} {}/Sector{}/SL_Files\".format(lcur_file, foldername, sector))\n else:\n os.system(\"rm -f {}\".format(lcur_file))\n \n # make a pie chart of the distribution of light curves in each bin\n pie_slices = [len(results[flag]) for flag in flags]\n plt.figure()\n plt.title('Distribution of Positive Detections')\n plt.pie(pie_slices, labels=flags)\n plt.savefig(\"{}/Sector{}/distribution.pdf\".format(foldername, sector))\n plt.close()\n ","sub_path":"troia/mf_pipeline_north.py","file_name":"mf_pipeline_north.py","file_ext":"py","file_size_in_byte":41621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"377854914","text":"\r\nfrom tkinter import * # * will imported only main file, not included sub-file\r\nfrom tkinter import ttk, messagebox # messagebox is pop-up error message\r\nimport csv\r\nfrom datetime import datetime #import current time stamp\r\n\r\n## GUI Structure ##\r\n#GUI\r\n #Tab1 (T1)\r\n\t#Frame1 (F1)\r\n\t #Label1 (L1)\r\n\t#Element1 (E1)\r\n #Tab2\r\n\r\n# grid ใช้กับ column, row ==> ใช้กับ pack ไม่ได้\r\n# pack ใช้วางจากบนลงล่าง\r\n# place ใช้ในการปรับระยะ x,y\r\n\r\n\r\nGUI = Tk()\r\nGUI.title('Expenses Recording v.1.0')\r\nGUI.geometry('650x750+500+30')\r\n\r\n\r\n\r\n############### Create Menu Bar ########################\r\nmenubar = Menu(GUI) #Menu is from import *\r\nGUI.config(menu=menubar)\r\n\r\n# File menu\r\nfilemenu = Menu(menubar,tearoff=0) #tearoff = 0 is off pop-up dialog\r\nmenubar.add_cascade(label='File',menu=filemenu) # Define : File menu name\r\nfilemenu.add_command(label='Import CSV')\r\nfilemenu.add_command(label='Export to Googlesheet')\r\n\r\n# Help menu\r\n\r\ndef About():\r\n\tprint('About Menu')\r\n\tmessagebox.showinfo('About','Created by P.Wijak')\r\n\r\nhelpmenu = Menu(menubar,tearoff=0)\r\nmenubar.add_cascade(label='Help',menu=helpmenu) # Define : Help menu name\r\nhelpmenu.add_command(label='About Information',command = About) #command is calling function About\r\n\r\n# Help menu\r\ndef Donate():\r\n\tmessagebox.showinfo('About','If you would like to donate, please contact me\\n \\t Thank you')\r\n\r\ndonatemenu = Menu(menubar,tearoff=0)\r\nmenubar.add_cascade(label='Donate',menu=donatemenu) # Define : Help menu name\r\ndonatemenu.add_command(label='Donate Information',command = Donate) #command is calling function About\r\n########################################################\r\n\r\n\r\n#Create Tabs : Notebook\r\nTab = ttk.Notebook(GUI)\r\nT1 = Frame(Tab) #Can add width and heigh\r\nT2 = Frame(Tab)\r\nTab.pack(fill=BOTH, expand = 1) #fill = both ; extend x and y axis | expand = 1 ;extend screen and using with fill function\r\n\r\n#Add photo into the tab\r\nt1_icon = PhotoImage(file='t1_expense.png') #.subsample(2) ใช้การย่อรูป 2 เท่า ใช้กับ .png ได้เท่านั้น\r\nt2_listicon = PhotoImage(file='t2_expenselist.png')\r\n\r\n#Tab with picture\r\nTab.add(T1, text=f'{\"Add Expense\" : ^{50}}', image=t1_icon,compound='top') \r\nTab.add(T2, text=f'{\"Expense list\" : ^{50}}', image=t2_listicon,compound='top')\r\n#text = f'{}' ; f-string ^ 50 is move the name to middle and space is 50\r\n#compound is control picture where it should be\r\nF1 = Frame(T1)\r\nF1.pack()\r\n#F1.place(x=20,y=50)\r\n\r\ndays = {'Mon' : 'จันทร์',\r\n\t'Tue' : 'อังคาร',\r\n\t'Wed' : 'พุธ',\r\n\t'Thu' : 'พฤหัส',\r\n\t'Fri' : 'ศุกร์',\r\n\t'Sat' : 'เสาร์',\r\n\t'Sun' : 'อาทิตย์'}\r\n\r\n\r\n\r\ndef Save(event = None): #event = None คือการ set ค่า defalut เป็น None\r\n\texpense = v_expense.get() #.get() คือดึงค่าจาก v_expense = StringVar()\r\n\tprice = v_price.get()\r\n\tpiece = v_piece.get()\r\n\r\n\tif expense == '':\r\n\t\t#print('ไม่มีข้อมูล กรุณากรอกข้อมูลใหม่')\r\n\t\tprint('No Data')\r\n\t\tmessagebox.showwarning('Error','กรุณากรอกข้อมูลค่าใช้จ่ายให้ครบ')\r\n\t\tv_expense.set('') #Clear expense to ' '\r\n\t\tv_price.set('') #Clear price to ' '\r\n\t\tv_piece.set('')\r\n\t\t#E1.focus() # Move cluster to expense fill\r\n\t\treturn\r\n\r\n\telif price == '':\r\n\t\tmessagebox.showwarning('Error','กรุณากรอกข้อมูลราคาให้ครบ')\r\n\t\tv_expense.set('') #Clear expense to ' '\r\n\t\tv_price.set('') #Clear price to ' '\r\n\t\tv_piece.set('')\r\n\t\t#E1.focus()\r\n\t\treturn\r\n\r\n\telif piece == '':\r\n\t\tmessagebox.showwarning('Error','กรุณากรอกข้อมูลจำนวนให้ครบ')\r\n\t\tv_expense.set('') #Clear expense to ' '\r\n\t\tv_price.set('') #Clear price to ' '\r\n\t\tv_piece.set('')\r\n\t\t#E1.focus()\r\n\t\treturn\r\n\r\n\r\n\ttry :\r\n\t\toverall = float(price)*int(piece)\r\n \r\n\t\t#Clear\r\n\t\tv_expense.set('') #Clear expense to ' '\r\n\t\tv_price.set('') #Clear price to ' '\r\n\t\tv_piece.set('')\r\n\t\t\r\n\t\t#Time setting\r\n\t\tdt = datetime.now() # Time Stamp\r\n\t\ttoday = dt.strftime('%a') #days['Mon'] = 'จันทร์'\r\n\t\tdate_time = dt.strftime('%Y-%m-%d %H:%M:%S') # Show Date and Time Stamp\r\n\t\tdate_time = days[today]+'-'+date_time\r\n\r\n\t\tprint(' Date : {} \\n รายการ: {} \\n ราคา: {} บาท \\n จำนวน: {} ชิ้น\\n รวมทั้งหมด: {} บาท\\n บันทึกแล้ว'.format(date_time,expense,price,piece,overall))\r\n\t\ttext = ' Date : {} \\n รายการ: {} \\n ราคา: {} บาท \\n จำนวน: {} ชิ้น\\n รวมทั้งหมด: {} บาท\\n บันทึกแล้ว'.format(date_time,expense,price,piece,overall)\r\n\t\t#text = text + 'จำนวน ...'\r\n\t\tv_result.set(text) #Show result\r\n\r\n\t\t#บันทึกข้อมูลลง csv\r\n\t\twith open('Expense data.csv','a',encoding='utf-8',newline='') as f: #'a' เพิ่มข้อมูลจากข้อมูลเก่า (Append), 'w' คือจะ save ทับค่าเก่า\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t#utf-8 คือสามารถ save เป็นภาษาไทยได้\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t#with คือสั่งเปิดไฟล์แล้วปิดอัตโนมัติ\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t#newline = '' ทำให้ข้อมูลไม่มีบรรทัดว่าง\r\n\t\t\tfw = csv.writer(f) #สร้างฟังก์ชั่นสำหรับเขียนข้อมูล\r\n\t\t\tdata = [date_time,expense,price,piece,overall]\r\n\t\t\tfw.writerow(data)\r\n\r\n\t\t#ทำให้ เคอเซอร์ กลับไปที่ E1\r\n\t\tE1.focus()\r\n\t\tupdate_table()\r\n\t\t\r\n\r\n\texcept Exception as e:\r\n\t\t#print('ERROR: {}'.format(e)) \r\n\t\tprint('ERROR:',e) #Show error as e\r\n\t\t#messagebox.showerror('Error','คุณกรอกข้อมูลผิด กรุณากรอกข้อมูลใหม่ ') # the first coma is topic pop-up \r\n\t\tmessagebox.showwarning('Error','คุณกรอกข้อมูลผิด กรุณากรอกข้อมูลใหม่ ')\r\n\t\t#messagebox.showinfo('Error','คุณกรอกข้อมูลผิด กรุณากรอกข้อมูลใหม่ ')\r\n\r\n\t\t#Clear data\r\n\t\tv_expense.set('') #Clear expense to ' '\r\n\t\tv_price.set('') #Clear price to ' '\r\n\t\tv_piece.set('')\r\n\r\n\r\n#ทำให้สามารถกดปุ่ม Enter ได้\r\nGUI.bind('',Save) #ต้องเพิ่มใน def Save เป็น Save(event =None) ด้วย\r\n\t\t\t\t\t#GUI.bind เป็นการเช็คว่ามีการกดปุ่ม return หรือไม่\r\n\t\r\nFONT1 = (None,20) # None is Front, 20 is size\r\n\r\n#---------Image------------\r\nmain_icon = PhotoImage(file='icon_money.png')\r\nlogo = Label(F1,image=main_icon)\r\nlogo.pack()\r\n\r\n#------text1 : รายการค่าใช้จ่าย---------\r\nL = ttk.Label(F1,text = 'รายการค่าใช้จ่าย',font = FONT1).pack()\r\n\r\nv_expense = StringVar() # StringVar() ตัวแปรพิเศษสำหรับเก็บข้อมูลใน GUI\r\nE1 = ttk.Entry(F1,textvariable=v_expense,font = FONT1)\r\nE1.pack()\r\n#--------------------\r\n\r\n#------text2 : ราคา---------\r\nL = ttk.Label(F1,text = 'ราคา (บาท)',font = FONT1).pack()\r\n\r\nv_price= StringVar() # StringVar() ตัวแปรพิเศษสำหรับเก็บข้อมูลใน GUI\r\nE2 = ttk.Entry(F1,textvariable=v_price,font = FONT1)\r\nE2.pack()\r\n#--------------------\r\n\r\n#------text3 : จำนวน---------\r\nL = ttk.Label(F1,text = 'จำนวน (ชิ้น)',font = FONT1).pack()\r\n\r\nv_piece= StringVar() # StringVar() ตัวแปรพิเศษสำหรับเก็บข้อมูลใน GUI\r\nE3 = ttk.Entry(F1,textvariable=v_piece,font = FONT1)\r\nE3.pack()\r\n#--------------------\r\n\r\nb1_icon = PhotoImage(file='b_save.png')\r\n\r\nB2 = ttk.Button(F1,text=f'{\"Save\": >{15}}', command=Save,image=b1_icon,compound='left')\r\nB2.pack(pady=20, ipadx=50,ipady=20)\r\n\r\n#Show the result on the screen\r\nv_result = StringVar()\r\nv_result.set('-------ผลลัพธ์--------')\r\nresult = ttk.Label(F1, textvariable=v_result, font=FONT1, foreground='green')\r\n#result = ttk.Label(F1, textvariable=v_result, font=FONT1, fg='green') >> Using with normal label\r\nresult.pack(pady=20)\r\n\r\n\r\n############# TAB2 ###################\r\n\r\ndef read_csv(): #Function with 'with'\r\n\t\r\n\t#global rs #Define global variable\r\n\r\n\twith open('Expense data.csv',newline='',encoding='utf-8') as f: #mode = 'a' or 'w' ใช้กับ wirter , utf-8 read ภาษาไทย\r\n\t\tfr = csv.reader(f) #fr is file reader\r\n\t\tdata = list(fr) #Transform data address to list for able to read the data as list\r\n\t\t# print(data)\r\n\t\t# print(data[0][0])\r\n\t\t# for a,b,c,d,e in data: \r\n\t\t# \tprint(b)\t\t\t#Can show list which you interest in the sub list\r\n\treturn data #We need get data to continous to use\r\n\r\n\t# Function without 'with' (When user open the file, the program will error)\r\n\t# f = open('savedata.csv',newline='',encoding='utf-8')\r\n\t# fr = csv.reader(f)\r\n\t# f.close() #Must close the file before run again\r\n\r\n# read_csv()\r\n# rs = read_csv()\r\n# print(rs)\r\n\r\n#Table\r\nL = ttk.Label(T2,text = 'ตารางแสดงผลลัพธ์ทั้งหมด',font = FONT1).pack(pady = 20)\r\n\r\nheader = ['วัน-เวลา','รายการ','ค่าใช้จ่าย','จำนวน','รวม']\r\nresulttable = ttk.Treeview(T2, columns=header, show='headings',height=20) # show headings is main topic as tabs | height is number of the list\r\nresulttable.pack()\r\n#Show table without header and fixed size\r\n\r\n### Manual header defination\r\n# resulttable.heading(header[0],text='header[0]')\r\n# resulttable.heading(header[1],text='header[1]')\r\n# resulttable.heading(header[2],text='header[2]')\r\n# resulttable.heading(header[3],text='header[3]')\r\n# resulttable.heading(header[4],text='header[4]')\r\n\r\n### For loop header defination\r\n# for i in range(len(header)):\r\n# \tresulttable.heading(header[i],text='header[i]')\r\n\r\n### Run data in list\r\nfor hd in header:\r\n\tresulttable.heading(hd,text=hd)\r\n\r\nheaderwidth = [150,170,80,80,80] #The unit is pixcel\r\n\r\n# resulttable.column('วัน-เวลา',width = 10) #Define and set format the header\r\n\r\n# for i in range(len(header)):\r\n# \tprint(header[i], headerwidth[i])\r\n\r\n# zip(header,headerwidth) #จับคู่กันข้อมูลกัน\r\n# list(zip(header,headerwidth))\r\n\r\n# enumerate() #จับคู่กันเป็นการเรียงอันดับ\r\n# for i,d in enumerate(zip(header,headerwidth)):\r\n# \tprint(i,d)\r\n\r\nfor hd,W in zip(header, headerwidth): #zip = จับคู่กันข้อมูลกัน\r\n\tresulttable.column(hd,width=W)\r\n\r\n\r\n# def update_record():\r\n# \tgetdata = read_csv()\r\n# \tv_allrecord.set('') #Reset a new data\r\n# \ttext = ''\r\n# \tfor d in getdata:\r\n# \t\ttxt = '{}---{}---{}---{}---{}\\n'.format(d[0],d[1],d[2],d[3],d[4]) #Call each lists\r\n# \t\ttext = text + txt\r\n\r\n# \tv_allrecord.set(text)\r\n\r\n# # Put data in the table\r\n# v_allrecord = StringVar()\r\n# v_allrecord.set('----------All Record----------')\r\n# Allrecord = ttk.Label(T2,textvariable=v_allrecord,font=(None,15),foreground='green')\r\n# Allrecord.pack()\r\n\r\n######################################################\r\n# การหยอดข้อมูล\r\n# resulttable.insert('','end',value=['จันทร์','น้ำดื่ม',30,5,150]) #value ต้องมีจำนวนข้อมูลเท่ากับ header / 'end' = จะเรียงจากข้อมูลมาก่อนไปยังล่าสุด แต่ 0 จะเรียงจากล่าสุดไปยังก่อนหน้านี้\r\n# resulttable.insert('',0,value=['จันทร์','น้ำดื่ม',30,5,150]) \r\n\r\ndef update_table():\r\n\r\n\t# for c in resulttable.get_children():\r\n\t# \tresulttable.delete(c)\r\n\tresulttable.delete(*resulttable.get_children()) # Delete data previous data | * is stand for for-loop\r\n\t\r\n\tgetdata = read_csv()\r\n\tfor dt in getdata:\r\n\t\tresulttable.insert('',0,value=dt) #'' = ตัวแรกสุด\r\n\r\n\r\n\r\nupdate_table()\r\n#print('GET CHILD:',resulttable.get_children())\r\n\r\n# update_record()\r\nGUI.bind('',lambda x: E2.focus())\r\nGUI.mainloop()\r\n","sub_path":"Expense.py","file_name":"Expense.py","file_ext":"py","file_size_in_byte":12567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"376678970","text":"import requests\nimport hashlib\nimport hmac\nimport csv\nimport base64\nimport json\nimport csv \nfrom collections import defaultdict\nimport copy\n\ndef hash_key(message):\n a = str.encode(message, encoding=\"utf-8\")\n hash = hmac.new(b'eRlUTImwkJqba#E', a, hashlib.sha1).digest()\n b = base64.encodestring(hash)\n b = b.rstrip()\n return \"INTEGRACION grupo3:\" + b.decode(\"utf-8\")\n\ndef get_almacenes():\n uri = 'https://integracion-2019-prod.herokuapp.com/bodega/'\n headers={\"Content-Type\": \"application/json\", \"Authorization\": hash_key('GET')}\n r = requests.get(uri+'almacenes', headers=headers)\n return r.json()\n\ndef skusWithStock(x):\n almacenes = {\n 'recepcion': '5cc7b139a823b10004d8e6d9',\n 'pulmon': '5cc7b139a823b10004d8e6dd',\n 'cocina': '5cc7b139a823b10004d8e6de',\n 'despacho': '5cc7b139a823b10004d8e6da',\n 'otro1': \"5cc7b139a823b10004d8e6dc\",\n 'otro2': \"5cc7b139a823b10004d8e6db\"\n }\n uri = 'https://integracion-2019-prod.herokuapp.com/bodega/'\n headers={\"Content-Type\": \"application/json\", \"Authorization\": str(hash_key('GET'+almacenes[x]))}\n r = requests.get(uri+'skusWithStock', headers=headers, params={\"almacenId\":almacenes[x]})\n return r.json()\n\ndef ver_stock():\n stock = defaultdict(lambda: 0) \n for a in ['despacho', 'recepcion', 'cocina', 'pulmon', 'otro1', 'otro2']:\n for i in skusWithStock(a):\n stock[i['_id']] += i['total']\n return stock\n\ndef minimos():\n dict_minimos = {}\n with open('Minimos.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n line_count += 1\n else:\n dict_minimos[row[0]] = {\"nombre\": row[1], \"cantidad\": row[3]}\n line_count += 1\n return dict_minimos\n\ndef dict_ingredientes():\n dict_ingredientes = defaultdict(list)\n with open('Ingredientes.csv') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n line_count += 1\n else:\n dict_ingredientes[row[0]].append({\"sku_ingrediente\": row[2], \"lote\": int(float(row[6])), \"cantidad\": int(float(row[9]))})\n line_count += 1\n return dict_ingredientes\n\n# FabricarSinPago\ndef Fabricar(sku, cantidad):\n uri = 'https://integracion-2019-prod.herokuapp.com/bodega/'\n headers={\"Content-Type\": \"application/json\", \"Authorization\": str(hash_key(str('PUT'+sku+str(cantidad))))}\n enviar = requests.put(uri+'fabrica/fabricarSinPago', headers=headers, json={\"sku\":sku, 'cantidad':cantidad})\n return enviar.json()\n\ndef cual_falta(prod, cantidad, lote):\n ing = dict_ingredientes()\n necesitan = []\n actuales = copy.deepcopy(ing[prod])\n for i in actuales:\n cantidad_necesaria = (int(cantidad / (lote)) +1) * i['cantidad']\n i['cantidad_necesaria'] = cantidad_necesaria\n materias_primas = defaultdict(lambda: 0)\n while len(actuales) > 0:\n actual = actuales.pop()\n ing_actuales = ing[actual['sku_ingrediente']]\n if len(ing_actuales) > 0:\n for i in ing_actuales:\n cantidad_necesaria = (int(actual['cantidad_necesaria'] / (i['lote'])) +1) * i['cantidad']\n i['cantidad_necesaria'] = cantidad_necesaria\n actuales.append(i)\n else:\n materias_primas[actual['sku_ingrediente']] += actual['cantidad_necesaria']\n return materias_primas\n\ndef cual_falta_total():\n mini = minimos()\n ings = dict_ingredientes()\n stock = ver_stock()\n faltan = defaultdict(lambda: 0)\n for i in mini:\n if i in stock and stock[i] > int(mini[i]['cantidad']):\n print('Ya hay suficiente')\n print(i)\n else:\n if i in stock:\n mini[i]['cantidad'] = str(int(mini[i]['cantidad'])- stock[i])\n if len(ings[i])> 0:\n necesario_x = cual_falta(i, int(mini[i]['cantidad']), int(int(ings[i][0]['lote'])))\n for x in necesario_x:\n faltan[x] += necesario_x[x]\n else:\n faltan[i] += int(mini[i]['cantidad'])\n #comparar con stock\n for x in stock:\n if x in faltan:\n if faltan[x] < stock[x]:\n faltan.pop(x)\n return faltan\n\n\n\ndef esta_completo(sku, cantidad, lote):\n almacenes = {\n 'recepcion': '5cc7b139a823b10004d8e6d9',\n 'pulmon': '5cc7b139a823b10004d8e6dd',\n 'cocina': '5cc7b139a823b10004d8e6de',\n 'despacho': '5cc7b139a823b10004d8e6da',\n 'otro1': \"5cc7b139a823b10004d8e6dc\",\n 'otro2': \"5cc7b139a823b10004d8e6db\"\n }\n todos = True\n f = cual_falta(sku, cantidad, lote)\n s = ver_stock()\n for x in f:\n if x not in s:\n todos = False\n return todos\n\n# Ver cuales de los productos minimos esta tienen todos sus ingredientes\ndef ver_completos():\n mini = minimos()\n ings = dict_ingredientes()\n completos = []\n for i in mini:\n if len(ings[i])> 0:\n if esta_completo(i, int(mini[i]['cantidad']), int(ings[i][0]['lote'])):\n completos.append(i)\n else:\n print('FALTA')\n print(i)\n return completos\n\ndef limpiar_despacho(origen, destino):\n almacenes = {\n 'recepcion': '5cc7b139a823b10004d8e6d9',\n 'pulmon': '5cc7b139a823b10004d8e6dd',\n 'cocina': '5cc7b139a823b10004d8e6de',\n 'despacho': '5cc7b139a823b10004d8e6da',\n 'otro1': \"5cc7b139a823b10004d8e6dc\",\n 'otro2': \"5cc7b139a823b10004d8e6db\"\n }\n uri = 'https://integracion-2019-prod.herokuapp.com/bodega/'\n headers={\"Content-Type\": \"application/json\", \"Authorization\": str(hash_key('GET'+almacenes[origen]))}\n r = requests.get(uri+'skusWithStock', headers=headers, params={\"almacenId\":almacenes[origen]})\n for i in r.json():\n headers={\"Content-Type\": \"application/json\", \"Authorization\": str(hash_key('GET'+almacenes[origen]+str(i['_id'])))}\n r2 = requests.get(uri+'stock', headers=headers, params={\"almacenId\":almacenes[origen], 'sku':i['_id']})\n for x in r2.json():\n headers={\"Content-Type\": \"application/json\", \"Authorization\": str(hash_key(str('POST'+x['_id']+almacenes[destino])))}\n enviar = requests.post(uri+'moveStock', headers=headers, json={\"productoId\":x['_id'], 'almacenId':almacenes[destino]})\n print(enviar.json())\n\n\ndef fabricar_sku(sku, falta, lote):\n almacenes = {\n 'recepcion': '5cc7b139a823b10004d8e6d9',\n 'pulmon': '5cc7b139a823b10004d8e6dd',\n 'cocina': '5cc7b139a823b10004d8e6de',\n 'despacho': '5cc7b139a823b10004d8e6da',\n 'otro1': \"5cc7b139a823b10004d8e6dc\",\n 'otro2': \"5cc7b139a823b10004d8e6db\"\n }\n completo = sku\n s = ver_stock()\n print(completo)\n i = dict_ingredientes()\n ings = i[completo]\n todos = True\n for i in ings:\n if i['sku_ingrediente'] not in s:\n print(i)\n todos = False\n if todos:\n print('todos')\n for i in ings:\n mover = (int(falta/lote)) * i['cantidad'] + 1\n print(mover)\n for a in almacenes:\n uri = 'https://integracion-2019-prod.herokuapp.com/bodega/'\n headers={\"Content-Type\": \"application/json\", \"Authorization\": str(hash_key('GET'+almacenes[a]+i['sku_ingrediente']))}\n r = requests.get(uri+'stock', headers=headers, params={\"almacenId\":almacenes[a], 'sku':i['sku_ingrediente']})\n for x in r.json():\n headers={\"Content-Type\": \"application/json\", \"Authorization\": str(hash_key(str('POST'+x['_id']+almacenes['despacho'])))}\n if mover > 0:\n enviar = requests.post(uri+'moveStock', headers=headers, json={\"productoId\":x['_id'], 'almacenId':almacenes['despacho']})\n print(enviar.json())\n mover -= 1\n falta_lotes = (int(falta/lote) + 1 ) * lote\n orden = Fabricar(completo, falta_lotes)\n print(orden)\n orden['sku_destino'] = completo\n ordenes = {}\n ordenes[orden['sku']] = orden\n\n# Imprimir el stock por almacen\ndef imprimir_stock_almacen():\n almacenes = {\n 'recepcion': '5cc7b139a823b10004d8e6d9',\n 'pulmon': '5cc7b139a823b10004d8e6dd',\n 'cocina': '5cc7b139a823b10004d8e6de',\n 'despacho': '5cc7b139a823b10004d8e6da', \n 'otro1': \"5cc7b139a823b10004d8e6da\",\n 'otro2': \"5cc7b139a823b10004d8e6db\"\n }\n for x in almacenes:\n print(x)\n print(skusWithStock(x))\n\n# Ver cuales elementos tienen el stock minimo\ndef imprimir_estado():\n m = minimos()\n s = ver_stock()\n for i in m:\n print(str(i), s[i], int(m[i]['cantidad']), s[i] >= int(m[i]['cantidad']))\n\n# Enviar productos a otro grupo\ndef enviar_produtos(sku, almacenId, cantidad):\n almacenes = {\n 'recepcion': '5cc7b139a823b10004d8e6d9',\n 'pulmon': '5cc7b139a823b10004d8e6dd',\n 'cocina': '5cc7b139a823b10004d8e6de',\n 'despacho': '5cc7b139a823b10004d8e6da', \n 'otro1': \"5cc7b139a823b10004d8e6da\",\n 'otro2': \"5cc7b139a823b10004d8e6db\"\n }\n for a in almacenes:\n uri = 'https://integracion-2019-prod.herokuapp.com/bodega/'\n headers={\"Content-Type\": \"application/json\", \"Authorization\": str(hash_key('GET'+almacenes[a]+sku))}\n r = requests.get(uri+'stock', headers=headers, params={\"almacenId\":almacenes[a], 'sku':sku})\n for x in r.json():\n headers={\"Content-Type\": \"application/json\", \"Authorization\": str(hash_key(str('POST'+x['_id']+almacenes['despacho'])))}\n if cantidad > 0:\n enviar = requests.post(uri+'moveStock', headers=headers, json={\"productoId\":x['_id'], 'almacenId':almacenes['despacho']})\n print(enviar.json())\n cantidad -= 1\n uri = 'https://integracion-2019-prod.herokuapp.com/bodega/'\n headers={\"Content-Type\": \"application/json\", \"Authorization\": str(hash_key(str('POST'+x['_id']+almacenId)))}\n enviar = requests.post(uri+'moveStockBodega', headers=headers, json={\"productoId\":x['_id'], 'almacenId':almacenId, 'precio': 1})\n print(enviar.json())\n\n\n","sub_path":"stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":10619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"86665616","text":"from collections import defaultdict, Counter\nfrom itertools import chain, repeat\n\n\n# Using Default dict\ndef merge_defdict(*dicts):\n\t'''\n\tMerges all passed in dicts and increases the frequency of keys and returns the sorted version back\n\t'''\n\n\tsorted_dic = defaultdict(int)\n\tfor dic in dicts:\n\t\tfor k, v in dic.items():\n\t\t\tsorted_dic[k] += v\n\n\tsorted_dic = dict(sorted(sorted_dic.items(), key=lambda x: x[1], reverse=True))\n\treturn sorted_dic\n\n\n# Using Counter object\ndef merge_counter_v1(*dicts):\n\t'''\n\tMerges all passed in dicts and increases the frequency of keys and returns the sorted version back\n\t'''\n\n\tc = Counter()\n\tfor dic in dicts:\n\t\tc.update(dic)\n\treturn dict(c.most_common())\n\n\n# Using Counter and chain and repeat\ndef merge_counter_v2(*dicts):\n\t'''\n\tMerges all passed in dicts and increases the frequency of keys and returns the sorted version back\n\t'''\n\tp = (repeat(*item) for dic in dicts for item in dic.items())\n\treturn dict(Counter(chain.from_iterable(p)).most_common())\n\n\nif __name__ == \"__main__\":\n\td1 = {'python': 10, 'java': 3, 'c#': 8, 'javascript': 15}\n\td2 = {'java': 10, 'c++': 10, 'c#': 4, 'go': 9, 'python': 6}\n\td3 = {'erlang': 5, 'haskell': 2, 'python': 1, 'pascal': 1}\n\tprint(merge_defdict(d1, d2, d3))\n\tprint(merge_counter_v1(d1, d2, d3))\n\tprint(merge_counter_v2(d1, d2, d3))\n","sub_path":"Part 3/Section 10 - Coding Exercises/arnav_sol.py","file_name":"arnav_sol.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"389400281","text":"import os, sys\n\n\ndef write_and_send(string):\n with open(\"RJfi.c\", \"w\") as fi:\n fi.write('#include\\nint main(){{printf(\"{0}\");}}'.format(string))\n print(\"{0}\".format(string))\n res = os.system(\"bash submit.sh RJfi.c\")\n\n\ndef liner(lines, string=None):\n for ans in [\"yes\", \"no\"]:\n print(lines)\n for n in range(20000):\n if 0 < lines - 1:\n if string is None:\n liner(lines - 1, \"{0}:{1}\\\\n\".format(ans, n))\n else:\n liner(lines - 1, \"{0}{1}:{2}\\\\n\".format(string, ans, n))\n else:\n if string is None:\n write_and_send(\"{0}:{1}\\\\n\".format(ans, n))\n else:\n write_and_send(\"{0}{1}:{2}\\\\n\".format(string, ans, n))\n\ndef main():\n lines = 200\n sys.setrecursionlimit(2000)\n for x in range(lines):\n liner(x)\n\nif __name__ == \"__main__\":\n main()\n\n\n","sub_path":"murder.py","file_name":"murder.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"497783692","text":"import scrapy,datetime\nfrom ..items import DianPingHotelItem\nfrom ..UserFunction.DianPingHotelPriceAPI import GetHotelDetailInformation\nfrom pandas.tseries.offsets import Day\n\n\nclass CtripHotelScrapy(scrapy.Spider):\n #定义爬虫名称\n name = 'CtripHotelScrapy'\n #定义爬取多少页(最大50页)\n max_page = 5\n #定义开始爬取的网址\n start_urls= ['http://www.dianping.com/fuzhou/hotel/p%d' % x for x in range(1,max_page)]\n\n # 设置独立的请求头\n headers = {'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',\n 'Connection': 'keep-alive',\n 'Referer': 'https://www.baidu.com',\n 'Host': 'www.dianping.com',\n }\n\n NOW_TIME = datetime.datetime.now().strftime('%Y-%m-%d')\n NEXT_DAY_TIME = (datetime.datetime.now() + Day()).strftime('%Y-%m-%d')\n\n def start_requests(self):\n for url in self.start_urls:\n yield scrapy.Request(url=url,headers=self.headers, callback=self.parse_list,dont_filter=False)\n\n def parse_list(self, response):\n for hotel in response.css('ul.hotelshop-list li.hotel-block'):\n # 创建对象\n item = DianPingHotelItem()\n #获取酒店名\n item['name'] = hotel.css('.hotel-name-link::text').extract_first()\n #获取酒店id\n item['id'] = hotel.css('li::attr(data-poi)').extract_first()\n #获取酒店房间最低价格(网址会在显示后通过JS获取最新数据,所以直接抓取会得到错误的价格)\n item['price'] = GetHotelDetailInformation(item['id'],self.NOW_TIME ,self.NEXT_DAY_TIME)[0]['price']\n #获取酒店网址,以便下一步爬取\n item['url'] ='http://www.dianping.com/shop/' + item['id']\n #爬取酒店的详细信息\n yield scrapy.Request(item['url'], meta={'item':item }, callback=self.parse_detail)\n # 有下级页面爬取 注释掉数据返回\n #yield item\n\n def parse_detail(self, response):\n # 接收上级已爬取的数据\n item = response.meta['item']\n # 获取酒店位置\n item['place'] = response.css('span.hotel-address::text').extract_first()\n # 获取评分\n item['score'] = response.css('span.score::text').extract_first()\n # 获取联系方式\n item['contact'] = response.css('.info-value::text').extract_first()\n # 获取开业时间\n item['destablishment_data'] = response.xpath('//ul[@class=\"list-info\"]/li/div[@class=\"info-value\"]/text()').extract()[1]\n print(response.xpath('//ul[@class=\"list-info\"]/li/div[@class=\"info-value\"]/text()').extract()[1])\n # 获取点评数量,[1:-1]用于去除两端的括号\n item['remark_number'] = response.css('#comment .count::text').extract_first()[1:-1]\n # 获取好评比例,先判断总数是否为0\n if int(response.css('a[data-filter] span.count::text').extract()[0][1:-1]) != 0 :\n item['good_ratio'] = int((int(response.css('a[data-filter] span.count::text').extract()[0][1:-1]) +\n int(response.css('a[data-filter] span.count::text').extract()[1][1:-1]) )/ int(item['remark_number'])\n *100)\n else:\n item['good_ratio'] = 0\n # 通过API获取房型列表(默认获取未来一天的房价,暂时先不采集)\n # now_time = datetime.datetime.now().strftime('%Y-%m-%d')\n # next_day_time = (datetime.datetime.now() + Day()).strftime('%Y-%m-%d')\n # item['room_type_list'] = GetHotelDetailInformation(item['id'],now_time,next_day_time)\n\n yield item","sub_path":"MyScrapy/spiders/CtripHotelScrapy.py","file_name":"CtripHotelScrapy.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"172318230","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\carbon\\common\\lib\\markdown\\extensions\\toc.py\nimport markdown\nfrom markdown.util import etree\nfrom markdown.extensions.headerid import slugify, unique, itertext\nimport re\n\nclass TocTreeprocessor(markdown.treeprocessors.Treeprocessor):\n\n def iterparent(self, root):\n for parent in root.getiterator():\n for child in parent:\n yield (parent, child)\n\n def run(self, doc):\n marker_found = False\n div = etree.Element('div')\n div.attrib['class'] = 'toc'\n last_li = None\n if self.config['title']:\n header = etree.SubElement(div, 'span')\n header.attrib['class'] = 'toctitle'\n header.text = self.config['title']\n level = 0\n list_stack = [div]\n header_rgx = re.compile('[Hh][123456]')\n used_ids = []\n for c in doc.getiterator():\n if 'id' in c.attrib:\n used_ids.append(c.attrib['id'])\n\n for p, c in self.iterparent(doc):\n text = ''.join(itertext(c)).strip()\n if not text:\n continue\n if c.text and c.text.strip() == self.config['marker'] and not header_rgx.match(c.tag) and c.tag not in ('pre',\n 'code'):\n for i in range(len(p)):\n if p[i] == c:\n p[i] = div\n break\n\n marker_found = True\n if header_rgx.match(c.tag):\n try:\n tag_level = int(c.tag[-1])\n while tag_level < level:\n list_stack.pop()\n level -= 1\n\n if tag_level > level:\n newlist = etree.Element('ul')\n if last_li:\n last_li.append(newlist)\n else:\n list_stack[-1].append(newlist)\n list_stack.append(newlist)\n if level == 0:\n level = tag_level\n else:\n level += 1\n if 'id' not in c.attrib:\n id = unique(self.config['slugify'](text, '-'), used_ids)\n c.attrib['id'] = id\n else:\n id = c.attrib['id']\n last_li = etree.Element('li')\n link = etree.SubElement(last_li, 'a')\n link.text = text\n link.attrib['href'] = '#' + id\n if self.config['anchorlink'] in [1, '1', True, 'True', 'true']:\n anchor = etree.Element('a')\n anchor.text = c.text\n anchor.attrib['href'] = '#' + id\n anchor.attrib['class'] = 'toclink'\n c.text = ''\n for elem in c._children:\n anchor.append(elem)\n c.remove(elem)\n\n c.append(anchor)\n list_stack[-1].append(last_li)\n except IndexError:\n pass\n\n if not marker_found:\n prettify = self.markdown.treeprocessors.get('prettify')\n if prettify:\n prettify.run(div)\n toc = self.markdown.serializer(div)\n for pp in self.markdown.postprocessors.values():\n toc = pp.run(toc)\n\n self.markdown.toc = toc\n return\n\n\nclass TocExtension(markdown.Extension):\n\n def __init__(self, configs):\n self.config = {\n 'marker': ['[TOC]', 'Text to find and replace with Table of Contents -Defaults to \"[TOC]\"'],'slugify': [slugify, \"Function to generate anchors based on header text-Defaults to the headerid ext's slugify function.\"],'title': [None, 'Title to insert into TOC
- Defaults to None'],'anchorlink': [0, '1 if header should be a self linkDefaults to 0']}\n for key, value in configs:\n self.setConfig(key, value)\n\n return\n\n def extendMarkdown(self, md, md_globals):\n tocext = TocTreeprocessor(md)\n tocext.config = self.getConfigs()\n md.treeprocessors.add('toc', tocext, '= 0:\n DEVICE = torch.device(\"cuda:%d\" % args.cuda)\nelse:\n DEVICE = torch.device(\"cpu\")\n\nif args.seed == -1:\n RANDOM_SEED = None\nelse:\n RANDOM_SEED = args.seed\n\nSTORE_PATH = args.storepath\nif not os.path.exists(STORE_PATH):\n os.mkdir(STORE_PATH)\nLOGFILE = os.path.join(STORE_PATH, 'training.log')\nTEST_PREDICTIONS = os.path.join(STORE_PATH, 'test_predictions.log')\nTEST_ALLPROBAS = os.path.join(STORE_PATH, 'test_allprobas.tensor')\n\nIMP_WEIGHT = args.imp_weight\nnum_epochs = args.epoch\nBATCH_SIZE = args.batch\nAGE_NUM = 5#21\n\n######################\n# 02.Logging\n#######################\nheader = []\nheader.append('PyTorch Version: %s' % torch.__version__)\nheader.append('CUDA device available: %s' % torch.cuda.is_available())\nheader.append('Using CUDA device: %s' % DEVICE)\nheader.append('Random Seed: %s' % RANDOM_SEED)\nheader.append('Output Path: %s' % STORE_PATH)\nheader.append('Task Importance Weight: %s' % IMP_WEIGHT)\n\nwith open(LOGFILE, 'w') as f:\n for entry in header:\n print(entry)\n f.write('%s\\n' % entry)\n f.flush()\n\n\n##########################\n# 03.SETTINGS\n##########################\nNUM_WORKERS = 0 \nlearning_rate = 0.0005\n#num_epochs = 1#50#200\n#AGE_NUM = 58#21\n#BATCH_SIZE = 80#64\nGRAYSCALE = False\n\ndf = pd.read_csv(TRAIN_CSV_PATH, index_col=0)\nages = df['age_id'].values\ndel df\nages = torch.tensor(ages, dtype=torch.float)\n\ndef task_importance_weights(label_array):\n uniq = torch.unique(label_array)\n num_examples = label_array.size(0)\n m = torch.zeros(uniq.shape[0])\n for i, t in enumerate(torch.arange(torch.min(uniq), torch.max(uniq))):\n m_k = torch.max(torch.tensor([label_array[label_array > t].size(0), \n num_examples - label_array[label_array > t].size(0)]))\n m[i] = torch.sqrt(m_k.float())\n\n imp = m/torch.max(m)\n return imp\n\n# Data-specific scheme\nif not IMP_WEIGHT:\n imp = torch.ones(AGE_NUM-1, dtype=torch.float)\nelif IMP_WEIGHT == 1:\n imp = task_importance_weights(ages)\n imp = imp[0:AGE_NUM-1]\nelse:\n raise ValueError('Incorrect importance weight parameter.')\nimp = imp.to(DEVICE)\n\n\n###################\n# 04.DataLoad\n###################\nclass AFADDatasetAge(Dataset):\n \"\"\"Custom Dataset for loading AFAD face images\"\"\"\n\n def __init__(self, csv_path, img_dir, transform=None):\n\n df = pd.read_csv(csv_path, index_col=0)\n self.img_dir = img_dir\n self.csv_path = csv_path\n self.img_paths = df['relpath']\n self.y = df['age_id'].values\n self.transform = transform\n\n def __getitem__(self, index):\n img = Image.open(os.path.join(self.img_dir,\n self.img_paths[index]))\n\n if self.transform is not None:\n img = self.transform(img)\n\n label = self.y[index]\n levels = [1]*label + [0]*(AGE_NUM - 1 - label)\n levels = torch.tensor(levels, dtype=torch.float32)\n\n return img, label, levels\n\n def __len__(self):\n return self.y.shape[0]\n\n\ncustom_transform = transforms.Compose([transforms.Resize((128, 128)),\n transforms.RandomCrop((120, 120)),\n transforms.ToTensor()])\n\ntrain_dataset = AFADDatasetAge(csv_path=TRAIN_CSV_PATH,\n img_dir=IMAGE_ROOT,\n transform=custom_transform)\n\ncustom_transform2 = transforms.Compose([transforms.Resize((128, 128)),\n transforms.CenterCrop((120, 120)),\n transforms.ToTensor()])\n\ntest_dataset = AFADDatasetAge(csv_path=TEST_CSV_PATH,\n img_dir=IMAGE_ROOT,\n transform=custom_transform2)\n\nvalid_dataset = AFADDatasetAge(csv_path=VALID_CSV_PATH,\n img_dir=IMAGE_ROOT,\n transform=custom_transform2)\n\ntrain_loader = DataLoader(dataset=train_dataset,\n batch_size=BATCH_SIZE,\n shuffle=True,\n num_workers=NUM_WORKERS)\n\ntest_loader = DataLoader(dataset=test_dataset,\n batch_size=BATCH_SIZE,\n shuffle=False,\n num_workers=NUM_WORKERS)\n\nvalid_loader = DataLoader(dataset=valid_dataset,\n batch_size=BATCH_SIZE,\n shuffle=False,\n num_workers=NUM_WORKERS)\n\n\n###########################################\n# 05.Initialize Cost, Model, and Optimizer\n###########################################\n\ndef cost_fn(logits, levels, imp):\n val = (-torch.sum((F.logsigmoid(logits)*levels\n + (F.logsigmoid(logits) - logits)*(1-levels))*imp,dim=1))\n return torch.mean(val)\n\n\ntorch.manual_seed(RANDOM_SEED)\ntorch.cuda.manual_seed(RANDOM_SEED)\n###05.resnet###\nmodel = resnet34_(AGE_NUM, GRAYSCALE)\nmodel.to(DEVICE)\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) \n\n########\n#06.training\n#########\ndef compute_mae_and_mse(model, data_loader, device):\n mae, mse, num_examples = 0, 0, 0\n for i, (features, targets, levels) in enumerate(data_loader):\n\n features = features.to(device)\n targets = targets.to(device)\n\n logits, probas = model(features)\n predict_levels = probas > 0.5\n predicted_labels = torch.sum(predict_levels, dim=1)\n num_examples += targets.size(0)\n mae += torch.sum(torch.abs(predicted_labels - targets))\n mse += torch.sum((predicted_labels - targets)**2)\n mae = mae.float() / num_examples\n mse = mse.float() / num_examples\n return mae, mse\n\nstart_time = time.time()\nbest_mae, best_rmse, best_epoch = 999, 999, -1\nfor epoch in range(num_epochs):\n\n model.train()\n for batch_idx, (features, targets, levels) in enumerate(train_loader):\n #if not batch_idx % 50:\n # print('debug pred:',targets, levels)\n features = features.to(DEVICE)\n targets = targets\n targets = targets.to(DEVICE)\n levels = levels.to(DEVICE)\n\n # FORWARD AND BACK PROP\n logits, probas = model(features)\n #if not batch_idx % 50:\n #print('debug output:',logits,probas)\n cost = cost_fn(logits, levels, imp)\n optimizer.zero_grad()\n\n cost.backward()\n\n # UPDATE MODEL PARAMETERS\n optimizer.step()\n\n # LOGGING\n if not batch_idx % 50:\n s = ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'\n % (epoch+1, num_epochs, batch_idx,len(train_dataset)//BATCH_SIZE, cost))\n print(s)\n with open(LOGFILE, 'a') as f:\n f.write('%s\\n' % s)\n \n ########### Valid ##############\n model.eval()\n with torch.set_grad_enabled(False):\n valid_mae, valid_mse = compute_mae_and_mse(model, valid_loader,device=DEVICE)\n if valid_mae < best_mae:\n best_mae, best_rmse, best_epoch = valid_mae, torch.sqrt(valid_mse), epoch\n ########## SAVE MODEL #############\n torch.save(model.state_dict(), os.path.join(STORE_PATH, MODEL_PT_NAME))\n \n s = 'MAE/RMSE: | Current Valid: %.2f/%.2f Ep. %d | Best Valid : %.2f/%.2f Ep. %d' % (\n valid_mae, torch.sqrt(valid_mse), epoch, best_mae, best_rmse, best_epoch)\n print(s)\n with open(LOGFILE, 'a') as f:\n f.write('%s\\n' % s)\n ################################\n\n s = 'Time elapsed: %.2f min' % ((time.time() - start_time)/60)\n print(s)\n with open(LOGFILE, 'a') as f:\n f.write('%s\\n' % s)\n\n\n#####################\n# 07.testing\n######################\nmodel.eval()\nwith torch.set_grad_enabled(False): # save memory during inference\n\n train_mae, train_mse = compute_mae_and_mse(model, train_loader,device=DEVICE)\n valid_mae, valid_mse = compute_mae_and_mse(model, valid_loader,device=DEVICE)\n test_mae, test_mse = compute_mae_and_mse(model, test_loader,device=DEVICE)\n\n s = 'MAE/RMSE: | Train: %.2f/%.2f | Valid: %.2f/%.2f | Test: %.2f/%.2f' % (\n train_mae, torch.sqrt(train_mse),\n valid_mae, torch.sqrt(valid_mse),\n test_mae, torch.sqrt(test_mse))\n print(s)\n with open(LOGFILE, 'a') as f:\n f.write('%s\\n' % s)\n\ns = 'Total Training Time: %.2f min' % ((time.time() - start_time)/60)\nprint(s)\nwith open(LOGFILE, 'a') as f:\n f.write('%s\\n' % s)\n\n\n###################\n# 08.save model\n###################\n########## EVALUATE BEST MODEL ######\nmodel.load_state_dict(torch.load(os.path.join(STORE_PATH, MODEL_PT_NAME)))\nmodel.eval()\nwith torch.set_grad_enabled(False):\n train_mae, train_mse = compute_mae_and_mse(model, train_loader,device=DEVICE)\n valid_mae, valid_mse = compute_mae_and_mse(model, valid_loader,device=DEVICE)\n test_mae, test_mse = compute_mae_and_mse(model, test_loader,device=DEVICE)\n\n s = 'MAE/RMSE: | Best Train: %.2f/%.2f | Best Valid: %.2f/%.2f | Best Test: %.2f/%.2f' % (\n train_mae, torch.sqrt(train_mse),\n valid_mae, torch.sqrt(valid_mse),\n test_mae, torch.sqrt(test_mse))\n print(s)\n with open(LOGFILE, 'a') as f:\n f.write('%s\\n' % s)\n\n########## SAVE PREDICTIONS ######\nall_pred = []\nall_probas = []\nwith torch.set_grad_enabled(False):\n for batch_idx, (features, targets, levels) in enumerate(test_loader):\n \n features = features.to(DEVICE)\n logits, probas = model(features)\n all_probas.append(probas)\n predict_levels = probas > 0.5\n predicted_labels = torch.sum(predict_levels, dim=1)\n lst = [str(int(i)) for i in predicted_labels]\n all_pred.extend(lst)\n\n\ntorch.save(torch.cat(all_probas).to(torch.device('cpu')), TEST_ALLPROBAS)\nwith open(TEST_PREDICTIONS, 'w') as f:\n all_pred = ','.join(all_pred)\n f.write(all_pred)\n","sub_path":"training/age/age_identity.py","file_name":"age_identity.py","file_ext":"py","file_size_in_byte":10984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"164531510","text":"import pickle\nfrom common import movies\nfrom django.conf import settings\nimport random\nimport os\nimport re\n\nclass slot():\n slot_place = 0\n\n @classmethod\n def minus(cls):\n if not cls.slot_place <= 0:\n cls.slot_place -= 1\n\n @classmethod\n def plus(cls):\n if not cls.slot_place >= 2:\n cls.slot_place += 1\n\n @classmethod\n def reset(cls):\n cls.slot_place = 0\n\nclass Selector():\n slot_place = 0\n\n @classmethod\n def minus(cls, game_data):\n i = game_data['moviedex'].index(cls.slot_place)\n if i > 0:\n cls.slot_place = game_data['moviedex'][i - 1]\n\n @classmethod\n def plus(cls, game_data):\n i = game_data['moviedex'].index(cls.slot_place)\n if i < len(game_data['moviedex']) - 1:\n cls.slot_place = game_data['moviedex'][i + 1]\n\n @classmethod\n def reset(cls, game_data):\n cls.slot_place = game_data['moviedex'][0] \n\nclass data_game():\n data = {}\n list_movie_nc = []\n capturable = ''\n\n def load_game(self, save_file):\n gd_file = open(settings.SAVE_FILES + '/' + save_file, 'rb')\n data = pickle.load(gd_file)\n gd_file.close()\n self.load(data)\n\n def save_game(self, slot):\n c = re.compile(\"^slot([{}])_([0-9]*)_([0-9]*)\\.mmg$\".format(list(['a','b','c'])[slot]))\n for s in os.listdir(settings.SAVE_FILES):\n match = c.match(s)\n if match and int(match.group(2)) < int(match.group(3)) and int(match.group(3)) == len(settings.MOVIES):\n os.remove(\"{}/{}\".format(settings.SAVE_FILES,match.group()))\n pathfile = \"{}/slot{}_{}_{}.mmg\".format(settings.SAVE_FILES,list(['a','b','c'])[slot],len(self.data['moviedex']),len(self.data['list_moviemon']))\n gd_file = open(pathfile, 'wb')\n pickle.dump(self.data,gd_file)\n gd_file.close()\n\n def save_state(self):\n gd_file = open('gamedata', 'wb')\n pickle.dump(self.data,gd_file)\n gd_file.close()\n \n def load_state(self):\n gd_file = open('gamedata', 'rb')\n self.data = pickle.load(gd_file)\n gd_file.close()\n \n def load(self, data):\n self.data = data\n self.list_movie_nc = self.get_list_movi_nc()\n self.save_state()\n return self\n \n def dump(self):\n return(self.data)\n \n def load_default_settings(self):\n self.data = {\n \"position\": [settings.POS_X, settings.POS_Y],\n \"nbr_movieball\":settings.NBR_MOVIEBALL,\n \"moviedex\":[],\n \"list_moviemon\" : movies.Movies_info().get_list(),\n }\n list_movie_nc = settings.MOVIES\n return self \n \n def get_list_movi_nc(self):\n movie_nc = []\n for movie in self.data['list_moviemon']:\n if not movie['imdbID'] in self.data['moviedex']:\n movie_nc.append(movie['imdbID'])\n return(movie_nc)\n \n def get_random_movie(self):\n list_moviemon_nc = self.get_list_movi_nc()\n return list_moviemon_nc[random.randint(0,len(list_moviemon_nc) - 1)]\n \n def get_strength(self):\n return(len(self.data['moviedex']))\n \n def get_movie(self, moviemon_id):\n for movie in self.data[\"list_moviemon\"]:\n if movie['imdbID'] == moviemon_id:\n detail_movie = {\n \"name\" : movie['Title'],\n \"poster\" : movie['Poster'],\n \"real\" : movie[\"Director\"],\n \"year\" : movie[\"Year\"],\n \"rating\" : movie['imdbRating'],\n \"synopsis\" : movie[\"Plot\"],\n \"actors\" : movie[\"Actors\"],\n }\n return detail_movie\n return None\n \n def checkpos(self):\n if self.data['position'][0] < 0 or self.data['position'][0] >= settings.GRID_SIZE:\n self.load_state()\n elif self.data['position'][1] < 0 or self.data['position'][1] >= settings.GRID_SIZE:\n self.load_state()\n else:\n return 1\n return 0\n \n @classmethod\n def define_capturable(cls, movie_id):\n cls.capturable = movie_id\n\n def transform_events(self, movieball_event, moviemon_id):\n events = ['','', '#']\n if movieball_event == \"True\":\n events[0] = 'You just found a ball!'\n self.data['nbr_movieball'] += 1\n if moviemon_id != \"\":\n movie_name = self.get_movie(moviemon_id)['name']\n events[1] = \"You encountered \" + movie_name + \", Press A to capture it!\"\n events[2] = 'http://127.0.0.1:8000/battle/' + moviemon_id + '/'\n self.define_capturable(moviemon_id)\n return events\n\n def try_random_events(self):\n events = ['', '#']\n if len(self.data['moviedex']) != len(self.data['list_moviemon']):\n if random.randint(1, 100) <= settings.FIND_BALL_PROBA_PERCENT:\n events[0] = True\n if random.randint(1, 100) <= settings.FIND_MOVIEMON_PROBA_PERCENT:\n movie_id = self.get_random_movie()\n events[1] = movie_id\n return events\n\n\nif __name__ == \"__main__\":\n d = data_game()\n d.load(0,0,5,[\"film1\", \"film2\"])\n ","sub_path":"rush00/common/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"403132594","text":"# _*_ coding:utf-8 _*_\nimport tensorflow as tf\nfrom GAN_test_discriminator import Discriminator\nfrom GAN_test_feature_discriminator import FeatureDiscriminator\nfrom GAN_test_encoder import GEncoder\nfrom GAN_test_decoder import GDecoder\nfrom encoder import Encoder\nfrom decoder import Decoder\nimport numpy as np\n\n\nclass GAN:\n def __init__(self,\n image_size,\n learning_rate=2e-5,\n batch_size=1,\n ngf=64,\n units=4096\n ):\n \"\"\"\n Args:\n input_size:list [H, W, C]\n batch_size: integer, batch size\n learning_rate: float, initial learning rate for Adam\n ngf: number of gen filters in first conv layer\n \"\"\"\n self.learning_rate = learning_rate\n self.input_shape = [int(batch_size / 4), image_size[0], image_size[1], image_size[2]]\n self.ones = tf.ones(self.input_shape, name=\"ones\")\n self.tenaor_name = {}\n\n self.EC_F = GEncoder('EC_F', ngf=ngf, units=units, keep_prob=0.85)\n self.DC_F = GDecoder('DC_F', ngf=ngf, output_channl=2, units=units)\n\n self.EC_M = Encoder('EC_M', ngf=ngf / 2, keep_prob=0.9)\n self.DC_M = Decoder('DC_M', ngf=ngf / 2, output_channl=2)\n\n self.D_F = Discriminator('D_F', ngf=ngf, keep_prob=0.85)\n self.FD_F = FeatureDiscriminator('FD_F', ngf=ngf)\n\n def gauss_2d_kernel(self, kernel_size=3, sigma=0.0):\n kernel = np.zeros([kernel_size, kernel_size])\n center = (kernel_size - 1) / 2\n if sigma == 0:\n sigma = ((kernel_size - 1) * 0.5 - 1) * 0.3 + 0.8\n s = 2 * (sigma ** 2)\n sum_val = 0\n for i in range(0, kernel_size):\n for j in range(0, kernel_size):\n x = i - center\n y = j - center\n kernel[i, j] = np.exp(-(x ** 2 + y ** 2) / s)\n sum_val += kernel[i, j]\n sum_val = 1 / sum_val\n return kernel * sum_val\n\n def gaussian_blur_op(self, image, kernel, kernel_size, cdim=3):\n # kernel as placeholder variable, so it can change\n outputs = []\n pad_w = (kernel_size * kernel_size - 1) // 2\n padded = tf.pad(image, [[0, 0], [pad_w, pad_w], [pad_w, pad_w], [0, 0]], mode='REFLECT')\n for channel_idx in range(cdim):\n data_c = padded[:, :, :, channel_idx:(channel_idx + 1)]\n g = tf.reshape(kernel, [1, -1, 1, 1])\n data_c = tf.nn.conv2d(data_c, g, [1, 1, 1, 1], 'VALID')\n g = tf.reshape(kernel, [-1, 1, 1, 1])\n data_c = tf.nn.conv2d(data_c, g, [1, 1, 1, 1], 'VALID')\n outputs.append(data_c)\n return tf.concat(outputs, axis=3)\n\n def gaussian_blur(self, x, sigma=0.5, alpha=0.15):\n gauss_filter = self.gauss_2d_kernel(3, sigma)\n gauss_filter = gauss_filter.astype(dtype=np.float32)\n y = self.gaussian_blur_op(x, gauss_filter, 3, cdim=1)\n y = tf.ones(y.get_shape().as_list()) * tf.cast(y > alpha, dtype=\"float32\")\n return y\n\n def get_mask(self, mask, p=5):\n shape = mask.get_shape().as_list()\n mask = tf.image.resize_images(mask, size=[shape[1] + p, shape[2] + p], method=1)\n mask = tf.image.resize_image_with_crop_or_pad(mask, shape[1], shape[2])\n return mask\n\n def model(self, f, mask):\n # F -> F_R VAE\n # f = self.gaussian_blur(f, sigma=0.7, alpha=0.3)\n f_one_hot = tf.reshape(tf.one_hot(tf.cast(f, dtype=tf.int32), depth=2, axis=-1),\n shape=[self.input_shape[0], self.input_shape[1], self.input_shape[2],\n 2 * self.input_shape[3]])\n m_one_hot = tf.reshape(tf.one_hot(tf.cast(mask, dtype=tf.int32), depth=2, axis=-1),\n shape=[self.input_shape[0], self.input_shape[1], self.input_shape[2],\n 2 * self.input_shape[3]])\n\n code_f_mean, code_f_logvar = self.EC_F(f / 10.0 + 0.9)\n shape = code_f_logvar.get_shape().as_list()\n code_f_std = tf.exp(0.5 * code_f_logvar)\n code_f_epsilon = tf.random_normal(shape, mean=0., stddev=1., dtype=tf.float32)\n code_f = code_f_mean + code_f_std * code_f_epsilon\n f_r_prob = self.DC_F(code_f)\n\n # CODE_F_RM\n code_f_rm = tf.random_normal(shape, mean=0., stddev=1., dtype=tf.float32)\n f_rm_prob = self.DC_F(code_f_rm)\n\n # D,FD\n j_f = self.D_F(f_one_hot)\n j_f_rm = self.D_F(f_rm_prob)\n\n code_f = tf.reshape(code_f, shape=[self.input_shape[0], 64, 64, -1])\n code_f_rm = tf.reshape(code_f_rm, shape=[self.input_shape[0], 64, 64, -1])\n j_code_f_rm = self.FD_F(code_f_rm)\n j_code_f = self.FD_F(code_f)\n\n mask_r_prob = self.DC_M(self.EC_M(f_one_hot))\n mask_rm_prob = self.DC_M(self.EC_M(f_rm_prob))\n\n D_loss = 0.0\n FG_loss = 0.0\n MG_loss = 0.0\n # 使得结构特征图编码服从正态分布的对抗性损失\n D_loss += self.mse_loss(j_code_f_rm, 1.0) * 10\n D_loss += self.mse_loss(j_code_f, 0.0) * 10\n FG_loss += self.mse_loss(j_code_f, 1.0) * 0.001\n\n FG_loss += self.mse_loss(tf.reduce_mean(code_f_mean), 0.0) * 0.001\n FG_loss += self.mse_loss(tf.reduce_mean(code_f_std), 1.0) * 0.001\n\n # 使得随机正态分布矩阵解码出结构特征图更逼真的对抗性损失\n D_loss += self.mse_loss(j_f, 1.0) * 25\n D_loss += self.mse_loss(j_f_rm, 0.0) * 25\n FG_loss += self.mse_loss(j_f_rm, 1.0) * 0.1\n\n # 结构特征图两次重建融合后与原始结构特征图的两两自监督一致性损失\n FG_loss += self.mse_loss(f_one_hot, f_r_prob) * 25\n\n FG_loss += tf.reduce_mean(tf.abs(f_one_hot - f_r_prob)) * 10\n FG_loss += (tf.reduce_mean(f_r_prob[:, :, :, 0]) - tf.reduce_mean(f_r_prob[:, :, :, 1])) * 0.0001\n FG_loss += (tf.reduce_mean(f_rm_prob[:, :, :, 0]) - tf.reduce_mean(f_rm_prob[:, :, :, 1])) * 0.0001\n\n FG_loss += self.mse_loss(0.0, m_one_hot * f_r_prob) * 1\n FG_loss += self.mse_loss(0.0, mask_rm_prob * f_rm_prob) * 1\n FG_loss += self.mse_loss(m_one_hot, mask_rm_prob) * 10\n\n MG_loss += self.mse_loss(m_one_hot, mask_r_prob) * 15\n\n new_f = tf.reshape(tf.cast(tf.argmax(f_one_hot, axis=-1), dtype=tf.float32), shape=self.input_shape)\n f_r = tf.reshape(tf.cast(tf.argmax(f_r_prob, axis=-1), dtype=tf.float32), shape=self.input_shape)\n f_rm = tf.reshape(tf.cast(tf.argmax(f_rm_prob, axis=-1), dtype=tf.float32), shape=self.input_shape)\n mask_r = tf.reshape(tf.cast(tf.argmax(mask_r_prob, axis=-1), dtype=tf.float32), shape=self.input_shape)\n mask_rm = tf.reshape(tf.cast(tf.argmax(mask_rm_prob, axis=-1), dtype=tf.float32), shape=self.input_shape)\n\n self.tenaor_name[\"code_f_rm\"] = str(code_f_rm)\n self.tenaor_name[\"f_rm\"] = str(f_rm)\n self.tenaor_name[\"j_f_rm\"] = str(j_f_rm)\n\n image_list = [new_f, f_r, f_rm, mask, mask_r, mask_rm]\n code_list = [code_f, code_f_rm]\n j_list = [j_code_f, j_code_f_rm, j_f, j_f_rm]\n loss_list = [FG_loss + MG_loss, D_loss]\n\n return image_list, code_list, j_list, loss_list\n\n def get_variables(self):\n return [self.EC_F.variables\n + self.DC_F.variables\n + self.EC_M.variables\n + self.DC_M.variables\n ,\n self.D_F.variables +\n self.FD_F.variables\n ]\n\n def optimize(self):\n def make_optimizer(name='Adam'):\n learning_step = (\n tf.train.AdamOptimizer(self.learning_rate, beta1=0.5, name=name)\n )\n return learning_step\n\n FG_optimizer = make_optimizer(name='Adam_FG')\n MG_optimizer = make_optimizer(name='Adam_MG')\n D_optimizer = make_optimizer(name='Adam_D')\n\n return FG_optimizer, MG_optimizer, D_optimizer\n\n def evaluation_code(self, code_list):\n code_f, code_f_rm = \\\n code_list[0], code_list[1]\n list = [self.PSNR(code_f, code_f_rm)]\n return list\n\n def evaluation_code_summary(self, evluation_list):\n tf.summary.scalar('evaluation_code/PSNR/code_f__VS__code_f_rm', evluation_list[0])\n\n def evaluation(self, image_list):\n f, f_r, f_rm, mask, mask_r, mask_rm = image_list[0], image_list[1], image_list[2], image_list[3], image_list[4], \\\n image_list[5]\n list = [self.PSNR(f, f_r),\n self.SSIM(f, f_r),\n self.PSNR(mask, mask_r),\n self.SSIM(mask, mask_r)]\n return list\n\n def evaluation_summary(self, evluation_list):\n tf.summary.scalar('evaluation/PSNR/f__VS__f_r', evluation_list[0])\n tf.summary.scalar('evaluation/SSIM/f__VS__f_r', evluation_list[1])\n tf.summary.scalar('evaluation/PSNR/mask__VS__mask_r', evluation_list[2])\n tf.summary.scalar('evaluation/SSIM/mask__VS__mask_r', evluation_list[3])\n\n def histogram_summary(self, j_list):\n j_code_f, j_code_f_rm, j_f, j_f_rm = j_list[0], j_list[1], j_list[2], j_list[3]\n tf.summary.histogram('discriminator/TRUE/j_code_f_rm', j_code_f_rm)\n tf.summary.histogram('discriminator/FALSE/j_code_f', j_code_f)\n tf.summary.histogram('discriminator/TRUE/j_f', j_f)\n tf.summary.histogram('discriminator/FALSE/j_f_rm', j_f_rm)\n\n def loss_summary(self, loss_list):\n FG_loss, D_loss = loss_list[0], loss_list[1]\n tf.summary.scalar('loss/FG_loss', FG_loss)\n # tf.summary.scalar('loss/MG_loss', MG_loss)\n tf.summary.scalar('loss/D_loss', D_loss)\n\n def image_summary(self, image_list):\n f, f_r, f_rm, mask, mask_r, mask_rm = image_list[0], image_list[1], image_list[2], image_list[3], image_list[4], \\\n image_list[5]\n tf.summary.image('image/f', f)\n tf.summary.image('image/f_rm', f_rm)\n tf.summary.image('image/f_r', f_r)\n # tf.summary.image('image/f_one_hot1', f_one_hot[:,:,:,0:1])\n # tf.summary.image('image/f_one_hot2', f_one_hot[:,:,:,1:2])\n # tf.summary.image('image/f_r_prob1', f_r_prob[:,:,:,0:1])\n # tf.summary.image('image/f_r_prob2', f_r_prob[:,:,:,1:2])\n tf.summary.image('image/mask', mask)\n tf.summary.image('image/mask_rm', mask_rm)\n tf.summary.image('image/mask_r', mask_r)\n\n def mse_loss(self, x, y):\n \"\"\" supervised loss (L2 norm)\n \"\"\"\n loss = tf.reduce_mean(tf.square(x - y))\n return loss\n\n def ssim_loss(self, x, y):\n \"\"\" supervised loss (L2 norm)\n \"\"\"\n loss = (1.0 - self.SSIM(x, y)) * 20\n return loss\n\n def PSNR(self, output, target):\n psnr = tf.reduce_mean(tf.image.psnr(output, target, max_val=1.0, name=\"psnr\"))\n return psnr\n\n def SSIM(self, output, target):\n ssim = tf.reduce_mean(tf.image.ssim(output, target, max_val=1.0))\n return ssim\n\n def norm(self, input):\n output = (input - tf.reduce_min(input, axis=[1, 2, 3])\n ) / (tf.reduce_max(input, axis=[1, 2, 3]) - tf.reduce_min(input, axis=[1, 2, 3]))\n return output\n","sub_path":"src_code/SWM_GET_F/GAN_test_model_mask_v3.py","file_name":"GAN_test_model_mask_v3.py","file_ext":"py","file_size_in_byte":11232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"76725648","text":"import tkinter\nfrom tkinter import BOTH, StringVar,END\n\n# Frame Colours\nRootColour = \"#D4F5F5\"\nInputColour = \"#554348\"\nOutputColour = \"#8C9A9E\"\n\n# Root Window\nroot = tkinter.Tk()\nroot.title(string=\"Welcome to GUI\")\nroot.iconbitmap(\"Codes/Hello GUI World/Images/InfoIcon.ico\")\nroot.geometry(\"300x400\")\nroot.resizable(0, 0)\nroot.config(bg=RootColour)\n\n# Functions\ndef SubmitName():\n if CaseStyle.get() == \"LowerCase\":\n NameLabel = tkinter.Label(OutputFrame, text=\"Hello \"+Name.get()+\", Keep Learning Tkinter!!\", bg=OutputColour)\n elif CaseStyle.get() == \"UpperCase\":\n NameLabel = tkinter.Label(OutputFrame, text=(\"Hello \"+Name.get()+\", Keep Learning Tkinter!!\").upper(), bg=OutputColour)\n NameLabel.pack()\n Name.delete(0,END)\n\n\n# Output and Input Frames\nInputFrame = tkinter.Frame(root, bg=InputColour, width=300, height=20)\nInputFrame.pack(padx=10, pady=10, fill=BOTH)\nInputFrame.pack_propagate(0)\nOutputFrame = tkinter.Frame(root, bg=OutputColour, width=300, height=380)\nOutputFrame.pack(padx=10, pady=(0, 10), expand=True, fill=BOTH)\n\n# Widgets and Buttons\nName = tkinter.Entry(InputFrame, text=\"Enter your name: \", width=25)\nName.grid(row=0, column=0, padx=10, pady=10)\nSubmit = tkinter.Button(InputFrame, text=\"Submit\", command=SubmitName)\nSubmit.grid(row=0, column=1, padx=10, pady=10, ipadx=20)\n\n# Radio Buttons\nCaseStyle = StringVar()\nCaseStyle.set('LowerCase')\nLowerCaseButton = tkinter.Radiobutton(\n InputFrame, text=\"Lower Case\", variable=CaseStyle, value=\"LowerCase\", bg=InputColour)\nUpperCaseButton = tkinter.Radiobutton(\n InputFrame, text=\"Upper Case\", variable=CaseStyle, value=\"UpperCase\", bg=InputColour)\nLowerCaseButton.grid(row=1, column=0)\nUpperCaseButton.grid(row=1, column=1)\n\nroot.mainloop()\n","sub_path":"Hello GUI World/Hello GUI World.py","file_name":"Hello GUI World.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"602155144","text":"import os\n\nimport numpy as np\nimport torch.utils.data as data\n\nfrom ventricle_segmentation.core import AnnotatedScan\nfrom ventricle_segmentation.utils import pickle_load, print_info\n\n\nclass ScansDataset(data.Dataset):\n \"\"\"\n Loader that loads dataset. Dataset is stored in dataset_dir.\n Each example is pickled AnnotatedScan\n See scripts/prepare_data.py to check how to create dataset\n \"\"\"\n\n def __init__(self, dataset_dir):\n \"\"\"\n :param str dataset_dir: Path to folder with pickle files\n \"\"\"\n self.dataset_dir = dataset_dir\n\n self.scan_pkl_files = [file for file in os.listdir(self.dataset_dir) if file.endswith(\".pkl\")]\n\n def __getitem__(self, index):\n scan_path = os.path.join(self.dataset_dir, self.scan_pkl_files[index])\n annotated_scan = pickle_load(scan_path) # type: AnnotatedScan\n\n dicom_img = (annotated_scan.dicom_img.astype(np.float32) - 500) / 1000\n dicom_img = np.expand_dims(dicom_img, 0) # adding channel at dimension 0, required dims in batch (N,C_in,H_in,W_in); N is not present yet\n\n imask = annotated_scan.imask.mask.astype(np.int64)\n\n return dicom_img, imask, annotated_scan.dicom_file, annotated_scan.imask.contours_file\n\n def __len__(self):\n return len(self.scan_pkl_files)\n","sub_path":"ventricle_segmentation/dataset_loader.py","file_name":"dataset_loader.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"32569492","text":"from wumpus.direction import Direction\nfrom wumpus.game.game import GameStatus\n\n\nDIRECTION_STRING = {\n Direction.NORTH: 'NORTH',\n Direction.EAST: 'EAST',\n Direction.SOUTH: 'SOUTH',\n Direction.WEST: 'WEST',\n}\n\nDEAD_BY_PIT = (\n 'Hunter: -AAAAAAAAAaaaaaaaaaaaaaaaaaaaaa!!!!!!!!!!!!!!!\\n'\n 'Narrator: -Poor soul, he felt in the forat. It\\'s better gold in hand than a hundred in the pit\\n'\n '\\n'\n 'You lose!\\n'\n)\nKILLED_BY_WUMPUS = (\n 'Hunter: -Heyy Wumpus, show me your moves!\\n'\n '(CHRUP CHRUP)\\n'\n 'Wumpus: -Mmmmm... Human meat <3\\n'\n 'Narrator: -Poor soul, now the legend of the Hunter is inside the Wumpus\\'s stomach\\n'\n '\\n'\n 'You lose!\\n'\n)\n\nWIN_WITHOUT_KILL_WUMPUS = (\n 'Wumpus: -Heyy Hunter, show me your moves!\\n'\n 'Narrator: Hunter takes the gold cowardly and runs to the exit (faster than Speedy Gonzalez).\\n'\n 'Wumpus: -NOOooOoOoooooo!!! :(\\n'\n '\\n'\n 'You win!\\n'\n)\nWIN_WITH_WUMPUS_DEAD = (\n 'Narrator: Toss a coin to your Hunter\\n'\n 'Narrator: Oh, valley of plenty\\n'\n 'Narrator: Oh, valley of plenty, oh\\n'\n 'Narrator: Toss a coin to your Hunter\\n'\n 'Narrator: ... Nevermind\\n'\n '\\n'\n 'You win!\\n'\n)\n\n\ndef render(game):\n if game.is_player_over_bottomless_pit:\n return DEAD_BY_PIT\n\n if game.is_wumpus_alive and game.is_player_over_wumpus:\n return KILLED_BY_WUMPUS\n\n if game.status == GameStatus.WIN and game.is_wumpus_alive:\n return WIN_WITHOUT_KILL_WUMPUS\n\n if game.status == GameStatus.WIN and not game.is_wumpus_alive:\n return WIN_WITH_WUMPUS_DEAD\n\n return _render_player_status(game)\n\n\ndef _render_player_status(game):\n position = game.player.position\n direction = game.player.direction\n\n gold = '1' if game.player_has_gold else '0'\n presence_messages = _render_presence_message(game)\n\n return (\n '---------------------\\n'\n f'Position: [{position.x}, {position.y}]\\n'\n f'Direction {DIRECTION_STRING[direction]}\\n'\n f'Golds: {gold}\\n'\n f'Arrows left: {game.player.arrows_left}\\n'\n '---------------------\\n'\n f'Presences: \\n'\n f'{presence_messages}'\n )\n\n\ndef _render_presence_message(game):\n presence_messages = []\n\n if game.is_player_at_exit:\n presence_messages.append('You are in the exit position')\n\n if game.is_player_over_bottomless_pit_presence:\n presence_messages.append('A fresh breeze fills you with determination')\n\n if game.is_player_over_wumpus_presence:\n presence_messages.append('Wumpus stink, you are about to poke')\n\n if game.is_player_over_wumpus:\n presence_messages.append(\n 'You feel that Cupid is near '\n 'after watch that arrow in the Wumpus heart'\n )\n\n if game.is_player_in_front_of_a_wall:\n presence_messages.append('It feels too strong to pass through... could it be a wall?')\n\n if game.is_player_over_gold:\n presence_messages.append('The gold is here!')\n\n return _format_presences_messages(presence_messages)\n\n\ndef _format_presences_messages(presences_messages):\n return ''.join(f' * {msg}\\n' for msg in presences_messages)\n","sub_path":"wumpus/game/string_game_renderer.py","file_name":"string_game_renderer.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"350493776","text":"num = int(input())\nstr = input().split(' ')\nneedtime =[int(i) for i in str]\n#print(needtime)\nneedtime.sort()\nadder = 0\nindex=0\nfor i in needtime:\n if i>=adder:\n index+=1\n adder+=i\nprint(index)","sub_path":"Code/CodeRecords/2814/60716/239584.py","file_name":"239584.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"601893061","text":"import http\nimport threading\n\nfrom flask import Flask, request\nimport requests\nfrom telegram import InputMediaPhoto, InputMediaVideo\nfrom telegram.ext import CallbackContext, Dispatcher\nimport urllib\nfrom waitress import serve\n\nimport core\nfrom core.vk_event_processor import Gif, Image, extract_events\nfrom repositories.config_repository import ConfigRepository\n\ndef run_api_server(dispatcher: Dispatcher, config_repo: ConfigRepository):\n\n app = Flask(__name__)\n app.config[\"DEBUG\"] = True\n\n @app.route('/chat//vk-event', methods=['POST'])\n def vk_event(chat_id: str):\n chat_id = int(chat_id) # negative integers in route do not get resolved for some reason\n raw_events = request.get_json()\n\n def proxy_image_to_chat(context: CallbackContext) -> None:\n\n events = extract_events(raw_events)\n\n for event in events:\n [is_media, medias] = event.try_get_media()\n [is_post, post] = event.try_get_post()\n\n if (is_media):\n caption = f'Memas from *{event.user.name}*'\n media_list = []\n\n for i in range(len(medias)):\n media = medias[i]\n bytes = requests.get(media.url).content\n\n if (type(media) is Image):\n tg_media = InputMediaPhoto(bytes, caption=(caption if i == 0 else None), parse_mode='markdown')\n media_list.append(tg_media)\n elif (type(media) is Gif):\n # using video since InputMediaDocument can't be mixed with other media types,\n # and InputMediaAnimation is not supported by send_media_group :\\\n tg_media = InputMediaVideo(bytes, caption=(caption if i == 0 else None), parse_mode='markdown')\n media_list.append(tg_media)\n\n context.bot.send_media_group(chat_id, media=media_list)\n\n if (is_post):\n post_attachments = post.attachments\n caption = f'{post.text}\\n\\n // {event.user.name}, {post.group_name}'\n media_list = []\n\n if (len(post_attachments) > 0):\n for i in range(len(post_attachments)):\n media = post_attachments[i]\n bytes = requests.get(media.url).content\n\n if (type(media) is Image):\n tg_media = InputMediaPhoto(bytes, caption=(caption if i == 0 else None), parse_mode='HTML')\n media_list.append(tg_media)\n elif (type(media) is Gif):\n # using video since InputMediaDocument can't be mixed with other media types,\n # and InputMediaAnimation is not supported by send_media_group :\\\n tg_media = InputMediaVideo(bytes, caption=(caption if i == 0 else None), parse_mode='HTML')\n media_list.append(tg_media)\n\n context.bot.send_media_group(chat_id, media=media_list)\n\n else:\n context.bot.send_message(chat_id, caption, parse_mode='HTML', disable_web_page_preview=True)\n\n dispatcher.job_queue.run_once(proxy_image_to_chat, 0)\n\n return ('', http.HTTPStatus.NO_CONTENT)\n\n @app.route('/message/markdown', methods=['POST'])\n def message_markdown():\n message = request.get_json()\n token = request.args.get('token')\n\n if (not token):\n return (\"Token was expected\", http.HTTPStatus.BAD_REQUEST)\n\n chat_id = config_repo.get_chat_by_token(token)\n\n if (not chat_id):\n return (\"Token was not found\", http.HTTPStatus.BAD_REQUEST)\n\n def send_message(context: CallbackContext) -> None:\n context.bot.send_message(chat_id, text=message, parse_mode='markdown', disable_notification=True)\n\n dispatcher.job_queue.run_once(send_message, 0)\n\n return ('', http.HTTPStatus.NO_CONTENT)\n\n @app.route('/message/html', methods=['POST'])\n def message_html():\n message = request.get_json()\n token = request.args.get('token')\n\n if (not token):\n return (\"Token was expected\", http.HTTPStatus.BAD_REQUEST)\n\n chat_id = config_repo.get_chat_by_token(token)\n\n if (not chat_id):\n return (\"Token was not found\", http.HTTPStatus.BAD_REQUEST)\n\n def send_message(context: CallbackContext) -> None:\n context.bot.send_message(chat_id, text=message, parse_mode='HTML', disable_notification=True)\n\n dispatcher.job_queue.run_once(send_message, 0)\n\n return ('', http.HTTPStatus.NO_CONTENT)\n\n @app.route('/chat//message', methods=['POST'])\n def send_message(chat_id):\n\n message = request.get_json()\n\n parse_mode = request.args.get('parse_mode')\n disable_notification = request.args.get('notification') != '1'\n\n def send_message(context: CallbackContext) -> None:\n try:\n context.bot.send_message(int(chat_id), text=message, parse_mode=parse_mode, disable_notification=disable_notification)\n except:\n context.bot.send_message(int(chat_id), text=f'⚠️ {message}', disable_notification=disable_notification)\n raise\n\n dispatcher.job_queue.run_once(send_message, 0)\n\n return ('', http.HTTPStatus.NO_CONTENT)\n\n def run():\n parsed = urllib.parse.urlsplit(core.env.telegram_listener)\n serve(app, host=parsed.hostname, port=parsed.port)\n\n thread = threading.Thread(target=run)\n thread.start()\n","sub_path":"telegram_bot/api_server.py","file_name":"api_server.py","file_ext":"py","file_size_in_byte":5841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"413326105","text":"import argparse\nimport glob\nimport logging\nimport os\nimport random\n\nimport numpy as np\nimport torch\nfrom sklearn.model_selection import KFold\nfrom seqeval.metrics import f1_score, precision_score, recall_score, accuracy_score\nfrom torch.nn import CrossEntropyLoss\nfrom torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom tqdm import tqdm, trange\n\nfrom torch.nn.functional import softmax\n\n\nfrom transformers import (\n MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,\n WEIGHTS_NAME,\n AdamW,\n AutoConfig,\n AutoModelForSequenceClassification,\n AutoTokenizer,\n get_linear_schedule_with_warmup,\n BertForDQD,\n)\nfrom utils_sentiment import *\n\nfrom sklearn.metrics import roc_auc_score\n\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ImportError:\n from tensorboardX import SummaryWriter\n\n# import pudb\n# pudb.set_trace()\n\n\nlogger = logging.getLogger(__name__)\n\nMODEL_CONFIG_CLASSES = list(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys())\nMODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)\n\nALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in MODEL_CONFIG_CLASSES), ())\n\nTOKENIZER_ARGS = [\"do_lower_case\", \"strip_accents\", \"keep_accents\", \"use_fast\"]\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)\n\ndef labelling(args, all_target_data, model_f1, model_f2, N_init):\n\n np.random.shuffle(all_target_data)\n cand_data = all_target_data[:N_init]\n all_input_ids = torch.tensor([x.input_ids for x in cand_data], dtype=torch.long)\n all_input_mask = torch.tensor([x.input_mask for x in cand_data], dtype=torch.long)\n all_segment_ids = torch.tensor([x.segment_ids for x in cand_data], dtype=torch.long)\n all_label_ids = torch.tensor([x.label_ids for x in cand_data], dtype=torch.long)\n\n dataset = TensorDataset(all_input_ids,\n all_input_mask, all_segment_ids, all_label_ids)\n\n eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)\n eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.mini_batch_size)\n\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n \n labeled_data = []\n logger.info(\"****** Running Labelling ******\")\n\n logger.info(\" Num examples = %d\", len(dataset))\n logger.info(\" Batch size = %d\", args.mini_batch_size)\n eval_loss = 0.0\n nb_eval_steps = 0\n preds = None\n out_label_ids = None\n model_f1.eval()\n model_f2.eval()\n all_logits1 = []\n all_logits2 = []\n choose_index = []\n all_true_labels = []\n all_pseudo_labels = []\n\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\n \"input_ids\":batch[0],\n \"attention_mask\":batch[1],\n \"token_type_ids\":batch[2],\n \"labels\":batch[3]\n }\n\n outputs1 = model_f1(**inputs)\n outputs2 = model_f2(**inputs)\n\n logits1 = outputs1[1]\n logits2 = outputs2[1]\n\n logits1 = softmax(logits1, dim=1)\n logits2 = softmax(logits2, dim=1)\n if len(all_logits1) == 0:\n all_logits1 = logits1.detach().cpu().numpy()\n all_logits2 = logits2.detach().cpu().numpy()\n all_true_labels = batch[3].detach().cpu().numpy()\n\n else:\n all_logits1 = np.append(all_logits1, logits1.detach().cpu().numpy(), axis=0)\n all_logits2 = np.append(all_logits2, logits2.detach().cpu().numpy(),\n axis=0)\n all_true_labels = np.append(all_true_labels, batch[3].detach().cpu().numpy(), axis=0)\n \n # do collect\n all_preds_max_1 = np.max(all_logits1, axis=-1)\n # [batch_size, seq_len, 1]\n all_preds_max_2 = np.max(all_logits2, axis=-1)\n# [batch_size, seq_len, 1]\n\n all_labels_1 = np.argmax(all_logits1, axis=1)\n all_labels_2 = np.argmax(all_logits2, axis=1)\n\n try:\n assert len(dataset) == len(all_preds_max_1) == len(all_preds_max_2) \n except Exception as e:\n print(\"num of dataset:\", len(dataset))\n print(\"num of all preds max 1\", len(all_preds_max_1))\n print(\"num of all preds max 2\", len(all_preds_max_2))\n\n labeled_data = []\n\n all_pseudo_true_labels = []\n for i in range(len(dataset)):\n record = cand_data[i]\n max_1 = all_preds_max_1[i]\n max_2 = all_preds_max_2[i]\n\n labels_1 = all_labels_1[i]\n labels_2 = all_labels_2[i]\n\n if labels_1 == labels_2 and max(max_1, max_2) >= args.alpha:\n record.label_ids = labels_1\n labeled_data.append(cand_data[i])\n all_pseudo_labels.append(labels_1)\n all_pseudo_true_labels.append(all_true_labels[i])\n \n if len(args.output_dir) > 0:\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n # each line contains true & predict\n f1_predict_path = os.path.join(args.output_dir, \"f1_results.txt\")\n f2_predict_path = os.path.join(args.output_dir, \"f2_results.txt\")\n pseudo_predict_path = os.path.join(args.output_dir, \"pseudo_labels.txt\")\n num_labels_path = os.path.join(args.output_dir, \"num_labels.txt\")\n pseudo_acc_path = os.path.join(args.output_dir, \"pseudo_acc.txt\")\n with open(f1_predict_path, \"w+\", encoding=\"utf-8\") as f:\n assert len(all_true_labels) == len(all_labels_1)\n for t, p in zip(all_true_labels, all_labels_1):\n line = str(t) + \"\\t\" + str(p) + \"\\n\"\n f.write(line)\n with open(f2_predict_path, \"w+\", encoding=\"utf-8\") as f:\n assert len(all_true_labels) == len(all_labels_2)\n for t, p in zip(all_true_labels, all_labels_2):\n line = str(t) + \"\\t\" + str(p) + \"\\n\"\n f.write(line)\n with open(pseudo_predict_path, \"w+\", encoding=\"utf-8\") as f:\n assert len(all_pseudo_true_labels) == len(all_pseudo_labels)\n for t, p in zip(all_pseudo_true_labels, all_pseudo_labels):\n line = str(t) + '\\t' + str(p) + \"\\n\"\n f.write(line)\n \n with open(pseudo_acc_path, \"w+\", encoding=\"utf-8\") as f:\n assert len(all_pseudo_true_labels) == len(all_pseudo_labels)\n f.write( \"{}\\n\".format(accuracy_score(all_pseudo_true_labels, all_pseudo_labels)))\n \n \n logger.info(\"**** collect labeled data size %s\", len(labeled_data))\n logger.info(\"**** accuracy of pseudo labels: {}\".format(accuracy_score(all_pseudo_true_labels, all_pseudo_labels)))\n return labeled_data\n\ndef prepare_dataset(source_features, labeled_features):\n features_L = source_features + labeled_features\n\n all_input_ids = torch.tensor([f.input_ids for f in features_L], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in features_L], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in features_L], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_ids for f in features_L], dtype=torch.long)\n\n dataset_L = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n\n\n all_input_ids = torch.tensor([f.input_ids for f in labeled_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in labeled_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in labeled_features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_ids for f in labeled_features], dtype=torch.long)\n\n\n dataset_TL = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n\n return dataset_L, dataset_TL\n\n\ndef tri_train(args, model_f1, model_f2, model_ft, source_features, target_features):\n Nt = args.N_init\n labeled_features = labelling(args, target_features, model_f1, model_f2, Nt)\n\n dataset_L, dataset_TL = prepare_dataset(source_features, labeled_features)\n k_step = args.k_step\n k_iterator = trange(k_step, desc=\"k_iter\", disable=args.local_rank not in[-1, 0])\n\n cnt = 0\n \n model_ft = train_ft(args,model_ft, dataset_TL)\n\n result = test(args, model_ft, args.tokenizer, args.labels, args.pad_token_label_id, mode=\"test\")\n result = evaluate(args, model_ft, args.tokenizer, args.labels, args.pad_token_label_id, mode=\"test\")\n \n return model_f1, model_f2, model_ft\n\ndef joint_tri_train(args, model_f1, model_f2, model_ft, source_features, target_features):\n Nt = args.N_init\n\n all_input_ids = torch.tensor([f.input_ids for f in source_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in source_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in source_features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_ids for f in source_features], dtype=torch.long)\n\n\n dataset_S = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n model_f1, model_f2 = train_f1_f2(args, model_f1, model_f2, dataset_S)\n\n labeled_features = labelling(args, target_features, model_f1, model_f2, Nt)\n dataset_L, dataset_TL = prepare_dataset(source_features, labeled_features)\n model_ft = train_ft(args,model_ft, dataset_TL)\n result = test(args, model_ft, args.tokenizer, args.labels, args.pad_token_label_id, mode=\"test\")\n result = evaluate(args, model_ft, args.tokenizer, args.labels, args.pad_token_label_id, mode=\"test\")\n return model_f1, model_f2, model_ft\n \n\ndef train_f1_f2(args, model_f1, model_f2, train_dataset):\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n \n args.train_batch_size = args.mini_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n args.num_train_epochs = 1\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n if args.warmup_ratio > 0:\n args.warmup_steps = int(t_total * args.warmup_ratio)\n\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in list(model_f1.named_parameters()) + list(model_f2.named_parameters()) if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in list(model_f1.named_parameters()) + list(model_f2.named_parameters()) if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ] \n\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n [model_f1, model_f2], optimizer = amp.initialize([model_f1, model_f2], optimizer, opt_level=args.fp16_opt_level)\n \n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model_f1 = torch.nn.DataParallel(model_f1)\n model_f2 = torch.nn.DataParallel(model_f2)\n\n \n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model_f1 = torch.nn.parallel.DistributedDataParallel(\n model_f1, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True\n )\n\n model_f2 = torch.nn.parallel.DistributedDataParallel(\n model_f2, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True\n )\n \n \n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n \n tr_loss, logging_loss = 0.0, 0.0\n model_f1.zero_grad()\n model_f2.zero_grad()\n\n\n set_seed(args)\n logger.info(\"***** train f1 f2 ******\")\n logger.info(\"***** Num examples: {} ********\".format(len(train_dataset)))\n\n for _ in range(1):\n epoch_iterator = tqdm(train_dataloader, desc=\"Iter(loss=X.XXX, lr=X.XXXXXXXX)\", disable=args.local_rank not in [-1, 0])\n\n for step, batch in enumerate(epoch_iterator):\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n \n model_f1.train()\n model_f2.train()\n batch = tuple(t.to(args.device) for t in batch)\n\n inputs = {\n \"input_ids\":batch[0],\n \"attention_mask\":batch[1],\n \"token_type_ids\":batch[2],\n \"labels\":batch[3]\n }\n\n outputs1 = model_f1(**inputs)\n loss1 = outputs1[0]\n\n \n outputs2 = model_f2(**inputs)\n loss2 = outputs2[0]\n\n w1 = model_f1.classifier.weight #[hidden_size, num_labels]\n w2 = model_f2.classifier.weight.transpose(-1, -2) #[num_labels, hidden_size]\n\n norm_term = torch.norm(torch.matmul(w1, w2))\n\n loss = loss1 + loss2 + args.alpha * norm_term\n\n if args.n_gpu > 1:\n loss = loss.mean()\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n epoch_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model_f1.parameters(), args.max_grad_norm)\n torch.nn.utils.clip_grad_norm_(model_f2.parameters(), args.max_grad_norm)\n \n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model_f1.zero_grad()\n model_f2.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n \n tb_writer.add_scalar(\"f1_f2_lr\", scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar(\"f1_f2_loss\", (tr_loss - logging_loss) / args.logging_steps, global_step)\n logging_loss = tr_loss\n\n \n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return model_f1, model_f2\n\ndef train_ft(args, model_ft, train_dataset):\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n \n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n args.num_train_epochs = 1\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n \n if args.warmup_ratio > 0:\n args.warmup_steps = int(t_total * args.warmup_ratio)\n \n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in list(model_ft.named_parameters()) if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in list(model_ft.named_parameters()) if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ] \n\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n \n \n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model_ft, optimizer = amp.initialize(model_ft, optimizer, opt_level=args.fp16_opt_level)\n \n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model_ft = torch.nn.DataParallel(model_ft)\n \n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model_ft = torch.nn.parallel.DistributedDataParallel(\n model_ft, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True\n )\n \n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n tr_loss, logging_loss = 0.0, 0.0\n\n model_ft.zero_grad()\n train_iterator = trange(\n epochs_trained, int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0]\n )\n\n set_seed(args)\n logger.info(\"******* train ft *************\")\n for _ in range(1):\n epoch_iterator = tqdm(train_dataloader, desc=\"Iter(loss=X.XXX, lr=X.XXXXXXXX)\", disable=args.local_rank not in [-1, 0])\n\n for step, batch in enumerate(epoch_iterator):\n model_ft.train()\n batch = tuple(t.to(args.device) for t in batch)\n\n inputs = {\n \"input_ids\":batch[0],\n \"attention_mask\":batch[1],\n \"token_type_ids\":batch[2],\n \"labels\":batch[3],\n }\n\n outputs = model_ft(**inputs)\n loss = outputs[0]\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu \n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n \n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n epoch_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model_ft.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model_ft.zero_grad()\n global_step += 1\n \n \n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return model_ft\n\ndef train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):\n \"\"\" Train the model \"\"\"\n if args.local_rank in [-1, 0]:\n tb_writer = SummaryWriter()\n\n args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)\n train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)\n\n if args.max_steps > 0:\n t_total = args.max_steps\n args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1\n else:\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs\n\n if args.warmup_ratio > 0:\n args.warmup_steps = int(t_total * args.warmup_ratio)\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": args.weight_decay,\n },\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], \"weight_decay\": 0.0},\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(\n optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total\n )\n\n # Check if saved optimizer or scheduler states exist\n if os.path.isfile(os.path.join(args.model_name_or_path, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(args.model_name_or_path, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"optimizer.pt\")))\n scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, \"scheduler.pt\")))\n\n if args.fp16:\n try:\n from apex import amp\n except ImportError:\n raise ImportError(\"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.\")\n model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)\n\n # multi-gpu training (should be after apex fp16 initialization)\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True\n )\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataset))\n logger.info(\" Num Epochs = %d\", args.num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n args.train_batch_size\n * args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n # Check if continuing training from a checkpoint\n if os.path.exists(args.model_name_or_path):\n # set global_step to gobal_step of last saved checkpoint from model path\n try:\n global_step = int(args.model_name_or_path.split(\"-\")[-1].split(\"/\")[0])\n except ValueError:\n global_step = 0\n epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)\n steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(\" Continuing training from epoch %d\", epochs_trained)\n logger.info(\" Continuing training from global step %d\", global_step)\n logger.info(\" Will skip the first %d steps in the first epoch\", steps_trained_in_current_epoch)\n\n tr_loss, logging_loss = 0.0, 0.0\n model.zero_grad()\n train_iterator = trange(\n epochs_trained, int(args.num_train_epochs), desc=\"Epoch\", disable=args.local_rank not in [-1, 0]\n )\n set_seed(args) # Added here for reproductibility\n for _ in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iter(loss=X.XXX, lr=X.XXXXXXXX)\", disable=args.local_rank not in [-1, 0])\n for step, batch in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {\"input_ids\": batch[0], \"attention_mask\": batch[1], \"labels\": batch[3]}\n if args.model_type != \"distilbert\":\n inputs[\"token_type_ids\"] = (\n batch[2] if args.model_type in [\"bert\", \"xlnet\"] else None\n ) # XLM and RoBERTa don\"t use segment_ids\n\n outputs = model(**inputs)\n loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)\n\n if args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n if args.gradient_accumulation_steps > 1:\n loss = loss / args.gradient_accumulation_steps\n\n if args.fp16:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n tr_loss += loss.item()\n if (step + 1) % args.gradient_accumulation_steps == 0:\n epoch_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))\n if args.fp16:\n torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)\n else:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n\n optimizer.step()\n scheduler.step() # Update learning rate schedule\n model.zero_grad()\n global_step += 1\n\n if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:\n # Log metrics\n if (\n args.local_rank == -1 and args.evaluate_during_training\n ): # Only evaluate when single GPU otherwise metrics may not average well\n results, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=\"dev\")\n for key, value in results.items():\n tb_writer.add_scalar(\"eval_{}\".format(key), value, global_step)\n tb_writer.add_scalar(\"lr\", scheduler.get_lr()[0], global_step)\n tb_writer.add_scalar(\"loss\", (tr_loss - logging_loss) / args.logging_steps, global_step)\n logging_loss = tr_loss\n\n if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:\n # Save model checkpoint\n output_dir = os.path.join(args.output_dir, \"checkpoint-{}\".format(global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n torch.save(args, os.path.join(output_dir, \"training_args.bin\"))\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n torch.save(optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n logger.info(\"Saving optimizer and scheduler states to %s\", output_dir)\n\n if args.max_steps > 0 and global_step > args.max_steps:\n epoch_iterator.close()\n break\n if args.max_steps > 0 and global_step > args.max_steps:\n train_iterator.close()\n break\n\n if args.local_rank in [-1, 0]:\n tb_writer.close()\n\n return global_step, tr_loss / global_step\n \n\ndef evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=\"\"):\n eval_examples = args.processor.get_dev_examples(args.data_dir, args.source_task)\n \n eval_features = convert_examples_to_features(\n eval_examples, labels, args.max_seq_length, tokenizer\n )\n all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_ids for f in eval_features], dtype=torch.long)\n\n eval_dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)\n\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n label_map = {v:i for i,v in enumerate(labels)}\n\n # multi-gpu evaluate\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation %s *****\", prefix)\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps, nb_eval_examples = 0, 0\n preds = None\n eval_TP, eval_FP, eval_FN = 0, 0, 0\n eval_accuracy = 0\n model.eval()\n all_labels = []\n all_preds = []\n all_false_prob = []\n softmax = torch.nn.Softmax(dim=-1)\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\":batch[1],\n \"token_type_ids\":batch[2],\n \"labels\": batch[3],\n }\n\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n if args.n_gpu > 1:\n tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating\n\n eval_loss += tmp_eval_loss.item()\n nb_eval_steps += 1\n \n \n label_ids = batch[3].detach().cpu().numpy()\n if len(all_labels) == 0:\n all_labels = label_ids\n else:\n all_labels = np.append(all_labels, label_ids)\n \n logits = torch.nn.functional.log_softmax(logits, dim=-1)\n logits = logits.detach().cpu().numpy()\n \n preds = np.argmax(logits, axis= -1)\n\n if len(all_preds) == 0:\n all_preds = preds\n else:\n all_preds = np.append(all_preds, preds)\n \n\n\n eval_loss = eval_loss / nb_eval_steps\n\n results = {\n \"task\": args.source_task,\n \"loss\": eval_loss,\n \"eval_accuracy\": accuracy_score(all_labels, all_preds),\n \n }\n\n logger.info(\"***** Eval results %s *****\", prefix)\n for key in sorted(results.keys()):\n logger.info(\" %s = %s\", key, str(results[key]))\n\n return results\n\ndef test(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=\"\"):\n eval_examples = args.processor.get_dev_examples(args.data_dir, args.target_task)\n \n eval_features = convert_examples_to_features(\n eval_examples, labels, args.max_seq_length, tokenizer\n )\n \n all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n all_label_ids = torch.tensor([f.label_ids for f in eval_features], dtype=torch.long)\n \n eval_dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n\n args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)\n # Note that DistributedSampler samples randomly\n eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)\n eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)\n label_map = {v:i for i,v in enumerate(labels)}\n\n # multi-gpu evaluate\n if args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Eval!\n logger.info(\"***** Running evaluation %s *****\", prefix)\n logger.info(\" Num examples = %d\", len(eval_dataset))\n logger.info(\" Batch size = %d\", args.eval_batch_size)\n eval_loss = 0.0\n nb_eval_steps, nb_eval_examples = 0, 0\n preds = None\n eval_TP, eval_FP, eval_FN = 0, 0, 0\n eval_accuracy = 0\n model.eval()\n softmax = torch.nn.Softmax(dim=-1)\n \n all_labels = []\n all_preds = []\n all_false_prob = []\n for batch in tqdm(eval_dataloader, desc=\"Evaluating\"):\n batch = tuple(t.to(args.device) for t in batch)\n\n with torch.no_grad():\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\":batch[1],\n \"token_type_ids\":batch[2],\n \"labels\": batch[3],\n }\n\n outputs = model(**inputs)\n tmp_eval_loss, logits = outputs[:2]\n\n if args.n_gpu > 1:\n tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating\n\n eval_loss += tmp_eval_loss.item()\n nb_eval_steps += 1\n \n \n label_ids = batch[3].detach().cpu().numpy()\n if len(all_labels) == 0:\n all_labels = label_ids\n\n else:\n all_labels = np.append(all_labels, label_ids)\n\n logits = torch.nn.functional.log_softmax(logits, dim=-1)\n logits = logits.detach().cpu().numpy()\n \n preds = np.argmax(logits, axis= -1)\n\n if len(all_preds) == 0:\n all_preds = preds\n else:\n all_preds = np.append(all_preds, preds)\n\n eval_loss = eval_loss / nb_eval_steps\n\n results = {\n \"task\": args.target_task,\n \"loss\": eval_loss,\n \"eval_accuracy\": accuracy_score(all_labels, all_preds),\n }\n\n logger.info(\"***** Eval results %s *****\", prefix)\n for key in sorted(results.keys()):\n logger.info(\" %s = %s\", key, str(results[key]))\n\n if len(args.result_path) > 0:\n txt_results = {}\n import json\n with open(args.result_path, \"w+\", encoding=\"utf-8\") as f:\n txt_results[\"source_task\"] = args.source_task\n txt_results[\"target_task\"] = results[\"task\"]\n txt_results[\"acc\"] = results[\"eval_accuracy\"]\n json.dump(txt_results, f)\n\n return results\n\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\n \"--data_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input data dir. Should contain the training files for the CoNLL-2003 NER task.\",\n )\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" + \", \".join(MODEL_TYPES),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \" + \", \".join(ALL_MODELS),\n )\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n\n parser.add_argument(\n \"--model_f1_path\",\n default=None,\n type=str,\n required=True,\n )\n parser.add_argument(\n \"--model_f2_path\",\n default=None,\n type=str,\n required=True\n )\n parser.add_argument(\n \"--model_ft_path\",\n default=None,\n type=str,\n required=True\n )\n\n # Other parameters\n parser.add_argument(\n \"--labels\",\n default=\"\",\n type=str,\n help=\"Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.\",\n )\n parser.add_argument(\n \"--config_name\", default=\"\", type=str, help=\"Pretrained config name or path if not the same as model_name\"\n )\n parser.add_argument(\n \"--tokenizer_name\",\n default=\"\",\n type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\",\n )\n parser.add_argument(\n \"--cache_dir\",\n default=\"\",\n type=str,\n help=\"Where do you want to store the pre-trained models downloaded from s3\",\n )\n parser.add_argument(\n \"--max_seq_length\",\n default=128,\n type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\",\n )\n parser.add_argument(\"--do_train\", action=\"store_true\", help=\"Whether to run training.\")\n parser.add_argument(\"--do_eval\", action=\"store_true\", help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_predict\", action=\"store_true\", help=\"Whether to run predictions on the test set.\")\n parser.add_argument(\n \"--evaluate_during_training\",\n action=\"store_true\",\n help=\"Whether to run evaluation during training at each logging step.\",\n )\n parser.add_argument(\n \"--do_lower_case\", action=\"store_true\", help=\"Set this flag if you are using an uncased model.\"\n )\n parser.add_argument(\n \"--keep_accents\", action=\"store_const\", const=True, help=\"Set this flag if model is trained with accents.\"\n )\n parser.add_argument(\n \"--strip_accents\", action=\"store_const\", const=True, help=\"Set this flag if model is trained without accents.\"\n )\n parser.add_argument(\"--use_fast\", action=\"store_const\", const=True, help=\"Set this flag to use fast tokenization.\")\n parser.add_argument(\"--per_gpu_train_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\n \"--per_gpu_eval_batch_size\", default=8, type=int, help=\"Batch size per GPU/CPU for evaluation.\"\n )\n parser.add_argument(\n \"--gradient_accumulation_steps\",\n type=int,\n default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\",\n )\n parser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float, help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\", default=3.0, type=float, help=\"Total number of training epochs to perform.\"\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\", default=0, type=int, help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument(\"--logging_steps\", type=int, default=500, help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\", type=int, default=500, help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\n \"--eval_all_checkpoints\",\n action=\"store_true\",\n help=\"Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number\",\n )\n parser.add_argument(\"--no_cuda\", action=\"store_true\", help=\"Avoid using CUDA when available\")\n parser.add_argument(\n \"--overwrite_output_dir\", action=\"store_true\", help=\"Overwrite the content of the output directory\"\n )\n parser.add_argument(\n \"--overwrite_cache\", action=\"store_true\", help=\"Overwrite the cached training and evaluation sets\"\n )\n parser.add_argument(\"--seed\", type=int, default=42, help=\"random seed for initialization\")\n\n parser.add_argument(\"--warmup_ratio\", type=float, default=0.1)\n\n parser.add_argument(\"--result_path\", type=str, default=\"\")\n parser.add_argument(\n \"--fp16\",\n action=\"store_true\",\n help=\"Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit\",\n )\n parser.add_argument(\n \"--fp16_opt_level\",\n type=str,\n default=\"O1\",\n help=\"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].\"\n \"See details at https://nvidia.github.io/apex/amp.html\",\n )\n parser.add_argument(\n \"--supervised_training\",\n action=\"store_true\",\n )\n parser.add_argument(\n \"--finetuned_bert\",\n action=\"store_true\"\n )\n parser.add_argument(\"--local_rank\", type=int, default=-1, help=\"For distributed training: local_rank\")\n parser.add_argument(\"--server_ip\", type=str, default=\"\", help=\"For distant debugging.\")\n parser.add_argument(\"--server_port\", type=str, default=\"\", help=\"For distant debugging.\")\n\n parser.add_argument(\"--source_task\", type=str, default=\"\")\n parser.add_argument(\"--target_task\", type=str, default=\"\")\n\n parser.add_argument(\"--k_step\", type=int, default=1)\n parser.add_argument(\"--iter\", type=int, default=1)\n parser.add_argument(\"--alpha\", type=float, default=0.5)\n parser.add_argument(\"--N_init\", type=int, default=100)\n parser.add_argument(\"--mini_batch_size\", type=int, default=32)\n parser.add_argument(\"--result_dir\", type=str, default=\"\")\n parser.add_argument(\"--joint_loss\", action=\"store_true\")\n\n args = parser.parse_args()\n\n if (\n os.path.exists(args.output_dir)\n and os.listdir(args.output_dir)\n and args.do_train\n and not args.overwrite_output_dir\n ):\n raise ValueError(\n \"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.\".format(\n args.output_dir\n )\n )\n \n # Setup distant debugging if needed\n if args.server_ip and args.server_port:\n # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script\n import ptvsd\n\n print(\"Waiting for debugger attach\")\n ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)\n ptvsd.wait_for_attach()\n\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()\n else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend=\"nccl\")\n args.n_gpu = 1\n args.device = device\n\n # Setup logging\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,\n )\n logger.warning(\n \"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s\",\n args.local_rank,\n device,\n args.n_gpu,\n bool(args.local_rank != -1),\n args.fp16,\n )\n\n # Set seed\n set_seed(args)\n processor = DataProcessor()\n args.processor = processor\n labels = processor.get_labels(args.data_dir)\n num_labels = len(labels)\n\n pad_token_label_id = CrossEntropyLoss().ignore_index\n args.pad_token_label_id = pad_token_label_id\n if args.local_rank not in [-1, 0]:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n\n args.model_type = args.model_type.lower()\n config = AutoConfig.from_pretrained(\n args.config_name if args.config_name else args.model_name_or_path,\n num_labels=num_labels,\n id2label={str(i): label for i, label in enumerate(labels)},\n label2id={label: i for i, label in enumerate(labels)},\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n tokenizer_args = {k: v for k, v in vars(args).items() if v is not None and k in TOKENIZER_ARGS}\n logger.info(\"Tokenizer arguments: %s\", tokenizer_args)\n tokenizer = AutoTokenizer.from_pretrained(\n args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,\n cache_dir=args.cache_dir if args.cache_dir else None,\n **tokenizer_args,\n )\n\n \n args.tokenizer = tokenizer\n args.labels = labels\n if args.local_rank == 0:\n torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab\n \n logger.info(\"Training/evaluation parameters %s\", args)\n\n if args.do_train:\n\n source_examples = processor.get_train_examples(args.data_dir, args.source_task)\n target_examples = processor.get_train_examples(args.data_dir, args.target_task)\n\n source_features = convert_examples_to_features(source_examples, labels, args.max_seq_length, tokenizer)\n target_features = convert_examples_to_features(target_examples, labels, args.max_seq_length, tokenizer)\n\n args.source_features = source_features\n args.target_features = target_features\n\n logger.info(\"**** source examples: {} *******\".format(len(source_features)))\n logger.info(\"**** target examples: {} *****\".format(len(target_features)))\n\n model_f1 = AutoModelForSequenceClassification.from_pretrained(\n args.model_f1_path,\n from_tf=bool(\".ckpt\" in args.model_f1_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n\n model_f2 = AutoModelForSequenceClassification.from_pretrained(\n args.model_f2_path,\n from_tf=bool(\".ckpt\" in args.model_f1_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n\n model_ft = AutoModelForSequenceClassification.from_pretrained(\n args.model_ft_path,\n from_tf=bool(\".ckpt\" in args.model_f1_path),\n config=config,\n cache_dir=args.cache_dir if args.cache_dir else None,\n )\n\n model_f1.to(args.device)\n model_f2.to(args.device)\n model_ft.to(args.device)\n if args.joint_loss:\n tri_train_func = joint_tri_train\n else:\n tri_train_func = tri_train\n model_f1, model_f2, model_ft = tri_train_func(args, model_f1, model_f2, model_ft, source_features, target_features)\n model = model_ft\n\n if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):\n # Create output directory if needed\n if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:\n os.makedirs(args.output_dir)\n\n logger.info(\"Saving model checkpoint to %s\", args.output_dir)\n # Save a trained model, configuration and tokenizer using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n model_to_save = (\n model.module if hasattr(model, \"module\") else model\n ) # Take care of distributed/parallel training\n model_to_save.save_pretrained(args.output_dir)\n tokenizer.save_pretrained(args.output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(args, os.path.join(args.output_dir, \"training_args.bin\"))\n\n # Evaluation\n results = {}\n if args.do_eval and args.local_rank in [-1, 0]:\n tokenizer = AutoTokenizer.from_pretrained(args.output_dir, **tokenizer_args)\n checkpoints = [args.output_dir]\n if args.eval_all_checkpoints:\n checkpoints = list(\n os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + \"/**/\" + WEIGHTS_NAME, recursive=True))\n )\n logging.getLogger(\"pytorch_transformers.modeling_utils\").setLevel(logging.WARN) # Reduce logging\n logger.info(\"Evaluate the following checkpoints: %s\", checkpoints)\n for checkpoint in checkpoints:\n global_step = checkpoint.split(\"-\")[-1] if len(checkpoints) > 1 else \"\"\n model = AutoModelForSequenceClassification.from_pretrained(checkpoint)\n model.to(args.device)\n result= evaluate(args, model, tokenizer, labels, pad_token_label_id, mode=\"dev\", prefix=global_step)\n if global_step:\n result = {\"{}_{}\".format(global_step, k): v for k, v in result.items()}\n results.update(result)\n output_eval_file = os.path.join(args.output_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n for key in sorted(results.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(results[key])))\n\n if args.do_predict and args.local_rank in [-1, 0]:\n tokenizer = AutoTokenizer.from_pretrained(args.output_dir, **tokenizer_args)\n model = AutoModelForSequenceClassification.from_pretrained(args.output_dir)\n model.to(args.device)\n result = test(args, model, tokenizer, labels, pad_token_label_id, mode=\"test\")\n # Save results\n output_test_results_file = os.path.join(args.output_dir, \"test_results.txt\")\n with open(output_test_results_file, \"w\") as writer:\n for key in sorted(result.keys()):\n writer.write(\"{} = {}\\n\".format(key, str(result[key])))\n \n return results\n\nif __name__ == \"__main__\":\n main()","sub_path":"examples/sentiment/run_sentiment_tri.py","file_name":"run_sentiment_tri.py","file_ext":"py","file_size_in_byte":50328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"78141345","text":"class D:\n def __get__(*args):\n print('get')\n def __set__(*args):\n raise AttributeError('Cannot set')\n\nclass C:\n a = D()\n\nX = C()\nX.a\n\nC.a\n\nX.a = 99\nprint(X.a)\n\n#print(list(X.__dict__.keys()))\n\n","sub_path":"python/readonly.py","file_name":"readonly.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"75768553","text":"\"\"\"\nCreated On: 20th Sept 2015\n@author: Amitayush Thakur,Jaiwant Rawat,Ashish Tilokani\n\"\"\"\n\nfrom DatabaseParser.models import WordTable, DocFreqTable, DocClass\nimport os\n\n# Create your views here.\nPATH_TO_DATASET = '../TrainingData/'\nMAX_NUM_OF_WORDS_READ = 300\n\ndef onlyascii(char):\n if ord(char) < 48 or ord(char) > 127: return ''\n else: return char\n\ndef get_my_string(file_path):\n s = \"\"\n l = len(file_path)\n i = 0\n while i < l:\n s += onlyascii(file_path[i])\n i += 1\n s = s.lower()\n return s\n\ndef parseTextToDb(fileList):\n #logic for counting and reading the text file\n for file in fileList:\n fileToken = file.split('/')\n if len(DocClass.objects.filter(className = fileToken[1],docName=fileToken[2])) != 0:\n continue\n DocClass.objects.create(className = fileToken[1],docName=fileToken[2])\n fp = open(file,encoding=\"latin-1\")\n wordList = fp.read().split()\n cnt = 0\n print('Inserting words for file '+file)\n for word in wordList:\n word = get_my_string(word)\n #print(word)\n if len(WordTable.objects.filter(word=word,docName = fileToken[2]))== 0 :\n WordTable.objects.create(word = word,docName = fileToken[2],freq = 1)\n if len(DocFreqTable.objects.filter(word=word))==0:\n DocFreqTable.objects.create(word=word,docFreq = 1)\n else:\n docIns = DocFreqTable.objects.filter(word=word)[0]\n docIns.docFreq += 1\n docIns.save()\n else:\n wordIns = WordTable.objects.filter(word=word,docName = fileToken[2])[0]\n wordIns.freq += 1\n wordIns.save()\n cnt += 1\n if cnt == MAX_NUM_OF_WORDS_READ:\n break\n print('Operation Completed for file '+file+' Moving ahead.... \\n\\n\\n')\n \ndef __main__():\n dirList = [x for x in os.listdir(PATH_TO_DATASET)]\n files = []\n for dirName in dirList:\n for x in os.listdir(PATH_TO_DATASET+dirName):\n if str(x)[-3:]== 'txt':\n pathName = PATH_TO_DATASET+dirName+'/'+str(x)\n files.append(pathName)\n parseTextToDb(files)\n\n#__main__()\n","sub_path":"DatabaseParser/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"293605645","text":"import sqlite3\nimport datetime\nimport urllib.request\n\ndef get_subscriptions():\t# queries database for subscriptions and returns data as a hash\n\tconn = sqlite3.connect('subscriptions.db')\n\tc = conn.cursor()\n\n\tc.execute('SELECT * FROM podcasts')\n\n\tdata = c.fetchall()\n\n\tconn.close()\n\n\treturn data\n\ndef download_episodes(data):\n\timport feedparser\n\tconn = sqlite3.connect('subscriptions.db')\n\tfor podcast in data:\n\t\tfeed = feedparser.parse(podcast[1])\n\t\titems = feed['items']\n\n\t\t# this part isnt all right\n\t\tprint('Downloading ' + items[0].title + '...')\n\n\t\t# logging data\n\t\t# log('TITLE -> ' + items[0].title)\n\t\t# log('LINK -> ' + items[0].link)\n\n\t\twith urllib.request.urlopen(items[0].link) as response, open('podcasts\\\\' + podcast[0] + '.mp3', 'wb') as out_file:\n\t\t\tdata = response.read()\n\t\t\tout_file.write(data)\n\t\t\tout_file.close()\n\t\t# end with\n\n\t\tc = conn.cursor()\n\t\tc.execute(\"UPDATE podcasts set last_downloaded_dt=\\'\" + items[0].updated + \"\\' where name=\\'\" + podcast[0] + \"\\'\")\n\t\tprint('Download finished!')\n\t# end for\n\n\tconn.commit()\n\tconn.close()\n\n\tprint('All downloads completed!')\n\ndef log(text):\n\n\toutput = open('log.txt', 'a')\n\toutput.write(str(datetime.datetime.now()) + ' >>> ' + text)\n\toutput.write('\\n')\n\toutput.close()\n\nstuff = get_subscriptions()\ndownload_episodes(stuff)\n","sub_path":"fetch_podcasts.py","file_name":"fetch_podcasts.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"328995444","text":"#!/usr/bin/python\n# Solved by Bogdan Trif @ Completed on Thu, 17 Nov 2016, 17:37\n#The Euler Project https://projecteuler.net\n'''\nXOR decryption - Problem 59\nEach character on a computer is assigned a unique code and the preferred standard is ASCII (American Standard Code for Information Interchange).\nFor example, uppercase A = 65, asterisk (*) = 42, and lowercase k = 107.\n\nA modern encryption method is to take a text file, convert the bytes to ASCII, then XOR each byte with a given value, taken from a secret key.\nThe advantage with the XOR function is that using the same encryption key on the cipher text, restores the plain text;\nfor example, 65 XOR 42 = 107, then 107 XOR 42 = 65.\n\nFor unbreakable encryption, the key is the same length as the plain text message, and the key is made up of random bytes.\nThe user would keep the encrypted message and the encryption key in different locations, and without both \"halves\",\nit is impossible to decrypt the message.\n\nUnfortunately, this method is impractical for most users, so the modified method is to use a password as a key.\nIf the password is shorter than the message, which is likely, the key is repeated cyclically throughout the message.\nThe balance for this method is using a sufficiently long password key for security, but short enough to be memorable.\n\nYour task has been made easy, as the encryption key consists of three lower case characters.\nUsing cipher.txt (right click and 'Save Link/Target As...'), a file containing the encrypted ASCII codes, and the knowledge\nthat the plain text must contain common English words, decrypt the message and find the sum of the ASCII values in the original text.\n'''\nimport time\n\nprint('---------------TESTS ----------------------')\n\nASCII={}\nfor i in range(32,128): ASCII[chr(i)] = i\nprint(ASCII)\n\nfor k, v in ASCII.items(): print(k, v, end=' ')\n\nprint('\\n',ASCII)\nprint('The \\ key corresponds to the value : ',ASCII['\\\\'])\nprint('The ~ key has the value of : ',ASCII.get('~'))\nprint('The index 78 corresponds to : ',list(ASCII.keys())[list(ASCII.values()).index(78)])\n\nprint('\\nReverse the Dictionary , keys become values :')\nascii = dict(zip(ASCII.values(),ASCII.keys()))\nprint(ascii)\nprint(ascii[43])\n\nprint('-----'*15,'\\n')\n\nfilename = 'pb059_cipher.txt'\nf = open(filename , 'r')\ntext = f.read()\nf.close()\nASCII_ciphered = []\n\nfor row in text.split(','):\n ASCII_ciphered.append(int(row))\n\nprint('The original ciphered message text containing ASCII Numbers representation which are encrypted:\\n',ASCII_ciphered)\nfor i in ASCII_ciphered: print(i,end=' ')\n\n\n\n# TESTS : 65 XOR 42 = 107, then 107 XOR 42 = 65.\nprint('\\n\\nXOR Test 1 : ', 65^42 )\nprint('XOR Test 2 : ', 107^42 )\n\n#############################################\n\nprint('-----'*15,'\\n')\n\n# Stupid XOR demo\nprint('----- Stupid XOR demo --------')\nfrom itertools import cycle\n\nmessage = 'attack at dawn'\nkey = 's3cr3t'\n\nprint('The message encoded in ASCII : ', [ ord(i) for i in message ] )\nprint('Converted back from ASCII : ', ''.join( chr(i) for i in [ ord(i) for i in message ] ) ,'\\n' )\n\ncyphered = ''.join(chr(ord(c)^ord(k)) for c,k in zip(message, cycle(key)))\n\nprint('The encrypted message encoded in ASCII : ',[ord(i) for i in cyphered ],'\\n' )\n\nprint(' Encryption of the message: %s ^ %s = %s' % (message, key, cyphered))\nmessage = ''.join(chr(ord(c)^ord(k)) for c,k in zip(cyphered, cycle(key)))\nprint(' Decryption of the message: %s ^ %s = %s' % (cyphered, key, message))\n\nprint('\\n--------------------- A RANDOM KEY TEST --------------------')\n\nkey = 'asl'\ntest_msg = ''.join( chr(i) for i in ASCII_ciphered )\nprint(' Conversion into letters of the encrypted text : ', type(test_msg),'\\n' , test_msg[0:100])\ntest_decoded = ''.join( chr(ord(c)^ord(k)) for c, k in zip(test_msg, cycle(key) ))\nprint('Use XOR decryption to test a random key : \\n', test_decoded)\n\nprint('\\n--------------------------- END TEST ---------------------')\n\nprint('\\n================ My FIRST SOLUTION, ===============\\n')\nt1 = time.time()\n\n\n\nstring_msg = ''.join( chr(i) for i in ASCII_ciphered )\n\ndef decode_encrypted_XOR_string(encrypted_string ):\n for i in range(ord('a'), ord('z')+1):\n for j in range(ord('a'), ord('z')+1):\n for k in range(ord('a'), ord('z')+1):\n key = chr(i)+chr(j)+chr(k)\n # print(key, ': ' ,end=' ')\n decoded_message = ''.join( chr( ord(c)^ord(k) ) for c, k in zip(encrypted_string , cycle(key)))\n if decoded_message.find('there') != -1 :\n print('Encryption key : ',key , '\\nThe Text :\\n\\n' , decoded_message)\n return decoded_message\n\ndecrypted_message = decode_encrypted_XOR_string(string_msg)\nprint('\\n',decrypted_message.replace('.', '.\\n'))\n# print('Loius is back home '.find('ist'))\n\ndecoded_ASCII = [ord(i) for i in decrypted_message]\nprint('decoded_ASCII : \\n', decoded_ASCII )\nprint('\\nFinal Answer : ', sum(decoded_ASCII))\n\nprint('\\n\\nchr again :) : \\n', [chr(i) for i in decoded_ASCII] )\n\n\n\nt2 = time.time()\nprint('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n')\n\n\nprint('\\n===============OTHER SOLUTIONS FROM THE EULER FORUM ==============')\nprint('\\n--------------------------SOLUTION 1, che_sac, USA --------------------------')\nt1 = time.time()\n# Brute Force. Instantaneous.\n\ncipher_text = ASCII_ciphered\n\n#print(cipher_text)\nsmall_alphas = [chr(i) for i in range(97,123)]\nfor first in small_alphas:\n\tfor second in small_alphas:\n\t\tfor third in small_alphas:\n\t\t\tdecipher_text = []\n\t\t\tactual_text = []\n\t\t\tencryption_key = [first,second,third]\n\t\t\tencryption_key = encryption_key * 400 + [encryption_key[0]]\n\t\t\tfor index in range(1201):\n\t\t\t\tc = cipher_text[index]\n\t\t\t\te = ord(encryption_key[index])\n\t\t\t\tc ^= e\n\n\t\t\t\tif c in range(65,91) or c in range(97, 123) or \\\n\t\t\t\t\t\tc in range(32, 60):\n\t\t\t\t\tactual_text.append(chr(c))\n\t\t\t\t\tdecipher_text.append(c)\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint('Sum of ASCII values in original text = ',sum(decipher_text))\n\t\t\t\tprint('Encryption key = ',first + second + third)\n\t\t\t\tprint(''.join(actual_text))\n\t\t\t\tbreak\n\n\nt2 = time.time()\nprint('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n') # Completed in : 732.041836 ms\n\nprint('\\n--------------------------SOLUTION 2, VERY NICE FUNCTIONS and OPEN URL, dorbt12, --------------------------')\nt1 = time.time()\n\n# First i run the solve function to find the key then cal the ascii sum\n\nimport urllib.request\n\ndef url_to_txt():\n response = urllib.request.urlopen('https://projecteuler.net/project/resources/p059_cipher.txt')\n data = response.read()\n txt = data.decode('utf-8')\n return txt\n\n\ndef ascii_to_string(txt):\n new_txt = ''\n for c in txt.split(','):\n new_txt += chr(int(c))\n return new_txt\n\n\ndef decrypt(msg, key):\n return ''.join([chr(ord(a) ^ ord(b)) for (a, b) in zip(msg, key * len(msg))])\n\n\ndef solve():\n alphabet = list(map(chr, range(97, 123)))\n txt = ascii_to_string(url_to_txt())\n for c1 in alphabet:\n for c2 in alphabet:\n for c3 in alphabet:\n key = c1 + c2 + c3\n msg = decrypt(txt, key)\n if \"the\" in msg and \"be\" in msg and \"to\" in msg and \"of\" in msg and 'and' in msg:\n print(msg)\n print(key)\n\n\ndef ascii_sum(msg, key):\n string_txt = decrypt(msg, key)\n ascii_list = [ord(c) for c in string_txt]\n s = 0\n for i in ascii_list:\n s += i\n print(s)\n\n\n# solve()\nascii_sum(ascii_to_string(url_to_txt()), 'god')\n\n\nt2 = time.time()\nprint('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n') # Completed in : 694.039822 ms\n\nprint('\\n--------------------------SOLUTION 3, VERY VERY FAST , chiong, Australia --------------------------')\nt1 = time.time()\n\n# This is fun. And I have broken my first encrypted message!!!\n\n\nimport random\nimport re\n\n\"\"\"\nq059\nNote:\n1. Information given:\n a. 3 letter key\n b. key in [a-z]\n c. key is repeated cyclically\nAlgorithm:\n1. Define list english_pattern of english word letters together with\n possible punctuation.\n2. For each encoded[0::KEY_LEN] find first letter key such that all\n decoded[0::KEY_LEN] are in english_pattern.\n3. Repeat step 2 for decoded[1::KEY_LEN] for second letter key, and so on.\n\nAlternate algorithm (without assuming key candidate)\n1. Define list english_pattern of english word letters together with\n possible punctuation.\n2. Get key[i=0] candidates using key_cand[i=0] = encoded[i=0] ^ each letter in english_pattern.\n3. For each candidate of key_cand[i=0],\n a. find decoded[KEY_LEN+0::KEY_LEN] = (key_cand[i=0] ^ encoded[KEY_LEN+0::KEY_LEN]).\n b. if any decoded[KEY_LEN+0::KEY_LEN] not in english_pattern, try next key_cand[i=0]\n and repeat Step 3a and 3b.\n c. Correct key[i=0] is given by key_cand[i=0] such that all decoded[KEY_LEN+0::KEY_LEN] are\n in english_pattern\n4. Continue for key[i=1], ..., key[n] by repeating Step 2 to 3 for each key[i].\n\"\"\"\n\nKEY_LEN = 3\nKEY_CANDIDATE = \"abcdefghijklmnopqrstuvwxyz\"\nenglish_pattern = r\"[a-zA-Z0-9 ,.'\\\"!?();:]\"\n\nwith open(filename, \"r\") as f:\n txt = f.read()\n txt_list = txt.split(\",\")\n encoded_list = [int(c) for c in txt_list]\n\ntic = time.time()\nregex = re.compile(english_pattern)\n\nkey_list = [ord(c) for c in KEY_CANDIDATE]\nrandom.shuffle(key_list)\nkey = [0 for _ in range(KEY_LEN)]\ndecoded_list = list(encoded_list)\nfor i in range(KEY_LEN):\n for j in key_list:\n k = i\n while k < len(encoded_list):\n decoded_list[k] = encoded_list[k] ^ j\n\n kkk = chr(decoded_list[k])\n\n if not regex.match(str(chr(decoded_list[k]))):\n break\n\n k += KEY_LEN\n else:\n key[i] = j\n break\n\nres = sum(decoded_list)\n\n\ndata_str = \"\".join([str(chr(c)) for c in decoded_list])\nkey_str = \"\".join([str(chr(c)) for c in key])\n\nprint ('Result: ', res ,'\\n' ,key_str, '\\n' ,data_str )\n\nt2 = time.time()\nprint('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n') # Completed in : 8.000374 ms\n\nprint('\\n--------------------------SOLUTION 4, froycard, Venezuela --------------------------')\nt1 = time.time()\n\ntext= ASCII_ciphered\ncode=[]\n\nfor i in range(103,123):\n for j in range(97,123):\n for k in range(97,123):\n code.append([i,j,k])\n\nfor k in code:\n back=[text[i]^k[i%3] for i in range(len(text))]\n outp = ''.join([chr(i) for i in back])\n if 'The ' in outp or 'the ' in outp:\n outp = ''.join([chr(i) for i in back])\n print (k, outp)\n break\nprint (sum([ord(i) for i in outp]))\nprint (\"DONE\")\n\n\nt2 = time.time()\nprint('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n')\n\nprint('\\n--------------------------SOLUTION 5, VERY VERY INTERESTING, ,dragonegghead1 , USA --------------------------')\nt1 = time.time()\n\n# One thing I knew was that there were very probably going to be a lot of spaces.\n# So I worked from that logic and took it from there:\n\nimport csv\nimport itertools\n\n#Load the values\nwith open(filename , newline='') as a:\n\tvalues = [int(i) for i in list(csv.reader(a))[0]]\n\n#Spaces should be the most numerous character.\nspaces = sorted(set(values),key=lambda x: values.count(x))[-3:]\n\n#Build an array of letters.\nletters = []\nfor i in range(ord(\"a\"),ord(\"z\")+1):\n\tif ord(\" \") in [i^x for x in spaces]:\n\t\tletters.append(i)\n\nif len(letters) != 3:\n\tprint(\"Something's wrong with your logic. Take another crack at it.\")\n\nelse:\n\tfor possibility in itertools.permutations(letters):\n\t\tmessage = list(map(lambda x: x[0]^x[1], zip(values, itertools.cycle(possibility))))\n\t\ttotal = sum(message)\n\t\tprint(possibility)\n\t\tprint(total)\n\t\tprint(''.join(chr(i) for i in message))\n\n\nt2 = time.time()\nprint('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n')\n\n\nprint('\\n--------------------------SOLUTION 6, Avi Levy, USA --------------------------')\nt1 = time.time()\n\nfrom statistics import mode\n\nmessage = [int(num) for line in open(filename) for num in line.split(',')]\n\nsize = 3\nm = len(message)\ncipher = [mode([message[j] for j in range(i, m-1, size)]) ^ ord(' ') for i in range(size)]\n\nprint(sum(cipher[i % size] ^ message[i] for i in range(m)))\n\nt2 = time.time()\nprint('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n')\n\n\n\nprint('\\n--------------------------SOLUTION 6, Sandamnit, USA --------------------------')\nt1 = time.time()\nimport operator\n\nkey = []\nwith open(filename, 'rt') as handle:\n # read the cipher\n cipher = handle.read().split(\",\")\n cipher = [ int(x) for x in cipher ]\n\n for n in range(3):\n # initialize counts for cipher values at positions == n (mod 3)\n counts = [0]*256\n for x in range(len(cipher[n::3])):\n counts[cipher[n::3][x]] += 1\n\n # recover the most frequent cipher character for the n-th key value\n # this is expected to be the space character (ASCII value == 32)\n k, _ = max(enumerate(counts), key=operator.itemgetter(1))\n\n # xor with 32 to recover n-th key value\n key.append(k ^ 32)\n\nprint(sum([ cipher[n] ^ key[n%3] for n in range(len(cipher)) ]))\n\nt2 = time.time()\nprint('\\nCompleted in :', round((t2-t1)*1000,6), 'ms\\n\\n')\n\n\n\n","sub_path":"Project EULER/pb059 XOR decryption.py","file_name":"pb059 XOR decryption.py","file_ext":"py","file_size_in_byte":13190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"7649302","text":"import pyglet\nfrom . import util\n\n\nclass PhysicalObject(pyglet.sprite.Sprite):\n \"\"\"Adds physical properties to sprites\"\"\"\n\n def __init__(self, *args, **kwargs):\n super(PhysicalObject, self).__init__(*args, **kwargs)\n\n # give velocity to the objects\n self.velocity_x, self.velocity_y = 0.0, 0.0\n\n # allows objects to die\n self.dead = False\n\n # allow new objects to spawn\n self.new_objects = []\n\n # Flag to toggle collision\n self.reacts_to_bullets = True\n self.is_bullet = False\n\n # tell the game about any event handlers\n # kb/m inputs\n self.event_handlers = []\n\n def update(self, dt):\n \"\"\"Need this to update every frame\"\"\"\n self.x += self.velocity_x * dt\n self.y += self.velocity_y * dt\n\n self.check_bounds()\n\n def check_bounds(self):\n min_x = -self.image.width / 2\n min_y = -self.image.height / 2\n max_x = 800 + self.image.width / 2\n max_y = 600 + self.image.height / 2\n\n if self.x < min_x:\n self.x = max_x\n elif self.x > max_x:\n self.x = min_x\n\n if self.y < min_y:\n self.y = max_y\n elif self.y > max_y:\n self.y = min_y\n\n def collides_with(self, other_object):\n \"\"\"determine if objects have collided\"\"\"\n if not self.reacts_to_bullets and other_object.is_bullet:\n return False\n if self.is_bullet and not other_object.reacts_to_bullets:\n return False\n\n # calculate distance between object centers for collision purposes\n collision_distance = self.image.width / 2 + other_object.image.width / 2\n\n # get the actual distance\n actual_distance = util.distance(self.position, other_object.position)\n\n return actual_distance <= collision_distance\n\n def handle_collision_with(self, other_object):\n \"\"\"process collision event\"\"\"\n if other_object.__class__ is not self.__class__:\n self.dead = True\n","sub_path":"pytheroid/version4/game_files/physicalobject.py","file_name":"physicalobject.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"339298441","text":"#!/usr/bin/env python\n#\n# test_embed.py -\n#\n# Author: Paul McCarthy \n#\n\nimport gc\nimport os.path as op\n\nimport wx\n\nimport fsl.data.image as fslimage\nimport fsl.utils.idle as idle\nimport fsleyes.main as fslmain\n\nfrom tests import simclick\n\n\ndatadir = op.join(op.dirname(__file__), 'testdata')\n\n\ndef test_embed():\n\n gc.collect()\n idle.idleReset()\n\n app = wx.App()\n frame = [wx.Frame(None)]\n panel = wx.Panel(frame[0])\n btn = wx.Button(panel)\n btn.SetLabel('Click to open FSLeyes')\n fsizer = wx.BoxSizer(wx.VERTICAL)\n frame[0].SetSizer(fsizer)\n fsizer.Add(panel, flag=wx.EXPAND)\n\n psizer = wx.BoxSizer(wx.VERTICAL)\n panel.SetSizer(psizer)\n psizer.Add(btn, flag=wx.EXPAND)\n\n sim = wx.UIActionSimulator()\n ncalls = [0]\n\n def finish():\n frame[0].Close()\n app.ExitMainLoop()\n\n def embedded(overlayList, displayCtx, fframe):\n\n print('Embedded call', ncalls[0])\n\n img = fslimage.Image(op.join(datadir, '3d'))\n fframe.addOrthoPanel()\n overlayList.append(img)\n fframe.Show()\n ncalls[0] += 1\n\n wx.CallLater(1500, fframe.Close)\n fframe = None\n if ncalls[0] < 4:\n wx.CallLater(2500, simclick, sim, btn)\n else:\n print('Done - closing')\n wx.CallLater(1500, finish)\n\n def open_fsleyes(ev):\n fslmain.embed(frame[0],\n callback=embedded,\n menu=False,\n save=False)\n\n btn.Bind(wx.EVT_BUTTON, open_fsleyes)\n\n wx.CallLater(1000, simclick, sim, btn)\n\n frame[0].Show()\n app.MainLoop()\n\n assert ncalls[0] == 4\n","sub_path":"tests/test_embed.py","file_name":"test_embed.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"147643775","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib import messages\nfrom django.utils import timezone\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.views.generic import (\n DetailView,\n ListView,\n View,\n CreateView\n )\n\nfrom .models import Lugar, Region, Provincia, Comuna\nfrom tocata.models import Tocata\nfrom propuestaslugar.models import LugaresTocata\n\nfrom .forms import (\n CrearLugarForm,\n RegionForm,\n ComunaForm,\n ActualizaLugarForm,\n BorrarLugarForm\n )\n\nfrom toca.parametros import parToca\n\nfrom toca.mixins import NextUrlMixin, RequestFormAttachMixin\n\n# Create your views here.\n\nclass MisLugaresListView(LoginRequiredMixin, ListView):\n\n template_name = 'lugar/mislugares.html'\n paginate_by = 12\n ordering = ['-fecha_crea']\n\n def get_queryset(self, *args, **kwargs):\n request = self.request\n mislugares = Lugar.objects.by_request(request)\n\n return mislugares\n\nclass LugarCreateView(NextUrlMixin, RequestFormAttachMixin, LoginRequiredMixin, CreateView):\n form_class = CrearLugarForm\n template_name = 'lugar/agregarlugar.html'\n success_url = '/lugares/mislugares'\n\n def form_valid(self, form):\n request = self.request\n msg = 'Dirección creado exitosamente'\n messages.success(request, msg)\n return super().form_valid(form)\n\n def form_invalid(self, form):\n request = self.request\n msg = 'Error al crear dirección'\n messages.error(request, msg)\n return super().form_invalid(form)\n\nclass ActualizaLugarView(LoginRequiredMixin, View):\n\n form_class = ActualizaLugarForm\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request, request.POST or None)\n if form.is_valid():\n lugar = form.cleaned_data['lugar']\n descripción = form.cleaned_data['descripción']\n lugar.update_descripción(descripción)\n\n return redirect('lugar:mislugares')\n\nclass BorrarLugarView(LoginRequiredMixin, View):\n\n form_class = BorrarLugarForm\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request, request.POST or None)\n if form.is_valid():\n lugar = form.cleaned_data['lugar']\n lugar.borrar()\n\n return redirect('lugar:mislugares')\n\n@login_required(login_url='index')\ndef carga_comunas_agregar(request):\n\n region_id = request.GET.get('region')\n comunas = Comuna.objects.filter(region=region_id).exclude(nombre='Todas').order_by('nombre')\n context = {\n 'comunas_reg': comunas,\n }\n return render(request, 'lugar/comuna_dropdown_list_options_agregar.html', context)\n\n@login_required(login_url='index')\ndef carga_comunas_actualizar(request):\n\n region_id = request.GET.get('region')\n comuna_id = request.GET.get('comuna')\n comunas = Comuna.objects.filter(region=region_id).exclude(nombre='Todas').order_by('nombre')\n\n if comuna_id.isdigit():\n context = {\n 'comunas_reg': comunas,\n 'comuna_id': int(comuna_id),\n }\n else:\n context = {\n 'comunas_reg': comunas,\n 'comuna_id': comunas.first(),\n }\n\n return render(request, 'lugar/comuna_dropdown_list_options_actualizar.html', context)\n","sub_path":"lugar/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"587979557","text":"\n'''\nName: Arrange anagram.\n\nAuthor: Ranaji Krishna.\n\nNotes:\nWrite a method to sort an array of strings so that all the anagrams are next to each\nother.\n\n'''\nfrom myLib import *\n\ndef grp_ana(tmp_arr):\n\tdict_grp = {}\n\t\n\tfor i in xrange(0,len(tmp_arr)):\n\t\tdict_grp.setdefault(''.join(sorted(map(lambda c:c, tmp_arr[i]))), []).append(tmp_arr[i])\n\n\treturn(dict_grp.values())\n\t\ndef main(argv = None):\n\n\tstr_arr = ['vini','lila','ranaji','ilal','invi','duncan','candun',\\\n\t\t 'lali','jirana','llia','dncuan']\n\n\tprint(grp_ana(str_arr))\n\treturn(0)\n\n\nif __name__ =='__main__':\n\tstatus = main()\n\tsys.exit(status)\n\n","sub_path":"codes_algo/code_python/cracks/srt_search/order_anagram.py","file_name":"order_anagram.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"17055125","text":"from django.shortcuts import reverse\nfrom rest_framework.test import APITestCase\nfrom ng.models import Contact\nfrom django.contrib.auth.models import User\n\n\nclass TestContactApi(APITestCase):\n def setUp(self):\n # create user for authentication as our api is taken based\n self.user = User.objects.create(username=\"test\", password=\"test\") \n # create Contact\n self.contact = Contact(userId=254, name=\"The Space Between Us\", phone=2017, email='doe@f.com')\n self.contact.save()\n\n def test_contact_creation(self):\n # authenticate before create\n self.client.force_authenticate(user=self.user)\n response = self.client.post(reverse('getAndPost'), {\n 'userId': 253,\n 'name': 'Bee Movie',\n 'phone': 2007,\n 'email': 'ad@kjfd.com'\n })\n self.assertEqual(Contact.objects.count(), 2)\n self.assertEqual(201, response.status_code)\n\n def test_getting_contact(self):\n response = self.client.get(reverse('getAndPost'), format=\"json\")\n self.assertEqual(len(response.data), 1)\n\n def test_updating_contact(self):\n #authenticate before update\n self.client.force_authenticate(user=self.user)\n\n response = self.client.put(reverse('updateAndDelete', kwargs={'pk': 1}), {\n 'userId':24,\n 'name': 'The Space Between Us updated',\n 'phone': 2017,\n 'email': 'df@jfl.com'\n }, format=\"json\")\n\n # check info returned has the update\n self.assertEqual('The Space Between Us updated', response.data['name'])\n\n def test_deleting_contact(self):\n # authenticate before delete\n self.client.force_authenticate(user=self.user)\n\n response = self.client.delete(reverse('updateAndDelete', kwargs={'pk': 1}))\n self.assertEqual(204, response.status_code)","sub_path":"ng/tests/test_api_view.py","file_name":"test_api_view.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"69002059","text":"import requests\nimport urllib.request\nimport chardet\nfrom bs4 import BeautifulSoup\nurl=\"http://www.shicimingju.com/book/sanguoyanyi.html\" # 要爬取的网络地址\nheaders={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'}\n#menuCode=urllib.request.urlopen(url).read() # 将网页源代码赋予menuCode\nmenuCode=requests.get(url,headers=headers)\nmenuCode.encoding='utf-8'\nsoup=BeautifulSoup(menuCode.text,'lxml')\n\nmenu=soup.find_all(id='mulu')#mmenu=soup.select('#mulu')\nvalues=','.join(str(x)for x in menu)\nsoup2=BeautifulSoup(values,'lxml')\nsoup2=soup2.ul\n\nf=open('我的三国演义.txt','a',encoding='utf-8')\n\n# soup2.contents[1].string #章节目录\n# soup2.contents[1].a['href']#章节对应链接\nbookMenu=[]\nbookMenuUrl=[]\nfor i in range(1,len(soup2.contents)-1):\n bookMenu.append(soup2.contents[i].string)\n bookMenuUrl.append(soup2.contents[i].a['href']) \ntop=\"http://www.shicimingju.com\"#bookMenu,bookMenuUrl\n#到此外层循环结束,即章节循环以及对应的子页网址循环,接着要循环子页网址内的内容\n#len(bookMenuUrl)==120\n\n#注意看注意看,这里的第二个for循环并不是子循环,是同时进行的,之前的爬取招聘类网址信息的时候是否可以参考\nfor i in range(0,len(bookMenuUrl)): \n href=top+bookMenuUrl[i]\n html=requests.get(href,headers=headers)\n html.encoding='utf-8'\n soup3=BeautifulSoup(html.text,'html.parser')\n soup4=soup3.find_all(id='con2')\n soup4=','.join(str(x)for x in soup4)\n soup5=BeautifulSoup(soup4,'html.parser')\n soup5=soup5.br\n soup5\n #len(soup5)=20+\n f.write(bookMenu[i])\n for j in range(0,len(soup5)):\n ctext=soup5.contents[j].string\n f.write(ctext)\nf.close()\n","sub_path":"爬取三国演义.py","file_name":"爬取三国演义.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"223924596","text":"import os\nimport time\n\nt0 = time.time()\n\nlr_list = [0.00001, 0.0001, 0.005, 0.01, 0.05, 0.1]\nfor i in range(len(lr_list)):\n lr = lr_list[i]\n os.system(f\"python cs285/scripts/run_hw3_dqn.py --env_name LunarLander-v3 --lander_final_lr {lr} --exp_name q3_hparam{i+1}\")\n\n\nt1 = time.time()\n\nprint(\"Total experiment time elapsed: \", t1 - t0)","sub_path":"hw3/cs285/scripts/experiment2.py","file_name":"experiment2.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"523683891","text":"import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom app import create_app\nfrom models import setup_db, Movies, Actors\n\nclass CastingAgencyTestCase(unittest.TestCase):\n # Class represents the Casting Agency Test Case.\n\n def setUp(self):\n # Define test variables & initialise the app.\n\n self.ASSISTANT_TOKEN = os.environ['ASSISTANT_TOKEN']\n self.DIRECTOR_TOKEN = os.environ['DIRECTOR_TOKEN']\n self.PRODUCER_TOKEN = os.environ['PRODUCER_TOKEN']\n\n self.token_assistant = {'Content-Type': 'application/json', 'Authorization': 'Bearer {}'.format(self.ASSISTANT_TOKEN)}\n self.token_director = {'Content-Type': 'application/json', 'Authorization': 'Bearer {}'.format(self.DIRECTOR_TOKEN)}\n self.token_producer = {'Content-Type': 'application/json', 'Authorization': 'Bearer {}'.format(self.PRODUCER_TOKEN)}\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"agency_test\"\n self.database_path = \"postgresql://{}:{}@{}/{}\".format('postgres', 'ph33rth33v1l','localhost:5432', self.database_name)\n\n setup_db(self.app, self.database_path)\n\n # Binds the app to the current context.\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # Create all tables.\n self.db.create_all()\n\n def tearDown(self):\n # Executed after each test.\n pass\n\n # Movie endpoints.\n # Test created for get_movies.\n def test_get_movies(self):\n movie = Movies(title='Ghost in the Shell', release_date='08-12-1995')\n movie.insert()\n res = self.client().get('/movies', headers=self.token_producer)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['movies'])\n \n # Test created for get_movies failure.\n def test_get_movies_fail(self):\n res = self.client().get('/movies')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n\n # Test created for post_movie.\n def test_post_movie(self):\n res = self.client().post('/movies', headers=self.token_producer, json={'title': 'The Matrix', 'release_date': '11-06-1999'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 201)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['movie'])\n \n # Test created for post_movie failure.\n def test_post_movie_failure(self):\n res = self.client().post('/movies', headers=self.token_assistant, json={'title': 'The Matrix', 'release_date': '11-06-1999'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 403)\n self.assertEqual(data['success'], False)\n \n # Test created for edit_movie\n def test_edit_movie(self):\n movie = Movies(title='Star Wars', release_date='27-12-77')\n movie.insert()\n movie_id = movie.id\n res = self.client().patch('/movies/'+str(movie_id) + '', headers=self.token_director, json={'title': 'Star Wars: A New Hope', 'release_date': '27-12-1977'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['movie'])\n \n # Test created for edit_movie failure.\n def test_edit_movie_failure(self):\n movie = Movies(title='Star Wars: The Empire Strikes Back', release_date='20-05-1980')\n movie.insert()\n movie_id = movie.id\n res = self.client().patch('/movies/'+str(movie_id) + '', headers=self.token_assistant, json={'title': 'Star Wars: Return of the Jedi', 'release_date': '02-06-1983'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 403)\n self.assertEqual(data['success'], False)\n \n # Test created for delete_movie.\n def test_delete_movie(self):\n movie = Movies(title='Akira', release_date='25-01-1988')\n movie.insert()\n movie_id = movie.id\n res = self.client().delete('/movies/'+str(movie_id) + '', headers=self.token_producer)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['delete'])\n \n # Test for delete_movie failure.\n def test_delete_movie_failure(self):\n res = self.client().delete('/movies/1234', headers=self.token_producer)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n\n # Actor Endpoints\n # Test created for get_actors.\n def test_get_actors(self):\n actor = Actors(name='Keanu Reeves', age=57, gender='Male')\n actor.insert()\n res = self.client().get('/actors', headers=self.token_producer)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['actors'])\n \n # Test created for get_actors failure.\n def test_get_actors_fail(self):\n res = self.client().get('/actors')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 401)\n\n # Test created for post_actor.\n def test_post_actor(self):\n res = self.client().post('/actors', headers=self.token_producer, json={'name': 'Hugo Weaving', 'age': 61, 'gender': 'Male'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 201)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['actor'])\n \n # Test created for post_actor failure.\n def test_post_actor_failure(self):\n res = self.client().post('/actors', headers=self.token_assistant, json={'name': 'Hugo Weaving', 'age': 61, 'gender': 'Male'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 403)\n self.assertEqual(data['success'], False)\n \n # Test created for edit_actor\n def test_edit_actor(self):\n actor = Actors(name='name Johansson', age=3, gender='Female')\n actor.insert()\n actor_id = actor.id\n res = self.client().patch('/actors/'+str(actor_id) + '', headers=self.token_director, json={'name': 'Scarlett Johansson', 'age': 36, 'gender': 'Female'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['actor'])\n \n # Test created for edit_actor failure.\n def test_edit_actor_failure(self):\n actor = Actors(name='Harrison Ford', age=79, gender='male')\n actor.insert()\n actor_id = actor.id\n res = self.client().patch('/actors/'+str(actor_id) + '', headers=self.token_assistant, json={'name': 'Harry Ford', 'age': 79, 'gender': 'Female'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 403)\n self.assertEqual(data['success'], False)\n \n # Test created for delete_actor.\n def test_delete_actor(self):\n actor = Actors(name='Brad Pitt', age=57, gender='Male')\n actor.insert()\n actor_id = actor.id\n res = self.client().delete('/actors/'+str(actor_id) + '', headers=self.token_producer)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['delete'])\n \n # Test for delete_actor failure.\n def test_delete_actor_failure(self):\n res = self.client().delete('/actors/1234', headers=self.token_producer)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":7831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"61992994","text":"#!/usr/bin/env python\n\"\"\"\nScript used to download test files from the GEMINI archive and save them inside\nthe path stored in the TEST_PATH environment variable. Before running it, make\nsure that you set this path using the following command:\n\n $ export TEST_PATH=\"/path/to/my/test/data/\"\n $ echo $TEST_PATH \n /path/to/my/test/data/\n\nThe test data is listed inside the global variable FILE_WITH_TEST_FILES. Each\nrow has one file. Each file can be preceeded with a subfolder. This is useful\nto isolate tests.\n\"\"\"\n\nimport os\nimport subprocess\nimport sys\n\n\nFILE_WITH_TEST_FILES = '.jenkins/test_files.txt'\nURL = u'https://archive.gemini.edu/file/'\n\ntry:\n TEST_PATH = os.environ['TEST_PATH']\n\nexcept KeyError as err:\n\n print('\\n This script needs the environment variable TEST_PATH'\n '\\n Please, add is using the following command: ' \\\n '\\n $ export TEST_PATH=\"/my/test/path/\"'\n '\\n and run again. Leaving now.'\n '\\n ')\n\n sys.exit(1)\n\n\ndef download_test_data():\n\n create_test_folder_if_does_not_exist()\n download_non_existing_test_files()\n\n\ndef create_test_folder_if_does_not_exist():\n\n print('')\n if os.path.exists(TEST_PATH):\n print(' Skip creation of existing folder: {}'.format(TEST_PATH))\n else:\n print(' Create non-existing test folder: {}'.format(TEST_PATH))\n os.makedirs(TEST_PATH)\n\n\ndef download_non_existing_test_files():\n\n with open(FILE_WITH_TEST_FILES, 'r') as list_of_files:\n\n print('')\n\n for _filename in list_of_files.readlines():\n\n current_file = os.path.join(TEST_PATH, _filename).strip()\n\n if len(_filename.strip()) == 0:\n print('')\n continue\n\n if _filename.startswith('#'):\n print(\" {}\".format(_filename.strip()))\n continue\n\n if os.path.exists(current_file):\n print(' Skip existing file: {:s}'.format(current_file))\n\n else:\n print(' Download missing file: {:s}'.format(current_file))\n _path, _file = os.path.split(current_file)\n\n if not os.path.exists(_path):\n os.makedirs(_path)\n\n try:\n subprocess.run(['curl', '--silent', URL + _file, '--output',\n current_file], check=True)\n except subprocess.CalledProcessError:\n print(' Failed to download file: {}'.format(current_file))\n\n print('')\n\n\nif __name__ == \"__main__\":\n download_test_data()\n","sub_path":".jenkins/download_test_data.py","file_name":"download_test_data.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"167067368","text":"def Propfair(GEvector,Evector,T):\r\n tc=5;\r\n Dvector=[GEvector/T for GEvector,T in zip(GEvector,T)] #metric vector for decision making\r\n MAX=Dvector.index(max(Dvector))\r\n SClist=[0]*len(GEvector) #refresh the Schedule list\r\n SClist[MAX]=1 #The Data Center which is selected\r\n print(SClist)\r\n for i in range(0,len(GEvector)):\r\n if SClist[i]==1:\r\n T[i]=(1-(1/tc))*T[i]+((1/tc))*GEvector[i]\r\n else:\r\n T[i]=(1-(1/tc))*T[i]\r\n \r\n return SClist\r\n","sub_path":"copy/PropFair.py","file_name":"PropFair.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"523149293","text":"\"\"\"backend URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom student.models import StudentBasic,Tuition,StudentWechat,StudentExamExtra,StudentExam,StudentCertification,StudentTextbook,Total as StudentTotal\nfrom family.models import FamilyBasic,FamilyWechat,FamilyTuition,FamilyOnduty,FamilyTextbook,FamilyCertification,Result,ResultExtra,Total as FamilyTotal\nimport xadmin\nfrom django.shortcuts import HttpResponse\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\ndef generate_total(request):\n q = StudentBasic.objects.all()\n p = FamilyBasic.objects.all()\n for i in p:\n FamilyTotal.objects.create(family=i)\n for j in q:\n StudentTotal.objects.create(student=j)\n\n return HttpResponse(\"

迁移成功

\")\n\ndef generate_tuition(requests):\n q = Tuition.objects.all()\n p =FamilyTuition.objects.all()\n for i in p:\n if i.fee_date is None:\n i.fee_date = '空'\n i.save()\n for j in q:\n if j.fee_date is None:\n j.fee_date='空'\n j.save()\n return HttpResponse(\"

迁移完成

\")\n\ndef generate_class(requests):\n q = StudentBasic.objects.all()\n for i in q:\n t = Tuition.objects.get(relate_student = i)\n t.relate_class = i.stu_class\n t.save()\n w = StudentWechat.objects.get(relate_student=i)\n w.relate_class=i.stu_class\n w.save()\n e = StudentExam.objects.get(relate_student=i)\n e.relate_class=i.stu_class\n e.save()\n ee = StudentExamExtra.objects.get(relate_student=i)\n ee.relate_class=i.stu_class\n ee.save()\n c = StudentCertification.objects.get(relate_student=i)\n c.relate_class=i.stu_class\n c.save()\n tb = StudentTextbook.objects.get(relate_student=i)\n tb.relate_class=i.stu_class\n tb.save()\n p = FamilyBasic.objects.all()\n for i in p:\n t = FamilyTuition.objects.get(relate_family=i)\n t.relate_class = i.fam_class\n t.save()\n w = FamilyWechat.objects.get(relate_family=i)\n w.relate_class = i.fam_class\n w.save()\n e = Result.objects.get(relate_family=i)\n e.relate_class = i.fam_class\n e.save()\n ee = ResultExtra.objects.get(relate_family=i)\n ee.relate_class = i.fam_class\n ee.save()\n c = FamilyCertification.objects.get(relate_family=i)\n c.relate_class = i.fam_class\n c.save()\n tb = FamilyTextbook.objects.get(relate_family=i)\n tb.relate_class = i.fam_class\n tb.save()\n return HttpResponse(\"

迁移完成

\")\n\nurlpatterns = [\n path('file/', admin.site.urls),\n path('',xadmin.site.urls),\n path('generate/',generate_total),\n path('generate_class',generate_class),\n path('generate_tuition', generate_tuition)\n]+static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)\n\nurlpatterns += staticfiles_urlpatterns()\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"157173423","text":"from fin_data_analyser.DataAnalyser import *\nfrom fin_manager_model.FrameworkHelper import *\nfrom fin_manager_model.MutualFunds import MutualFunds\nfrom fin_manager_model.ReportingEngine import ReportingEngine\n\n\nclass MFValue(DataRule):\n def __init__(self, reporting_currency='INR'):\n super(MFValue, self).__init__()\n self.reporting_currency = reporting_currency\n self.calculation_date = get_todays_date()\n\n def execute(self):\n re = ReportingEngine()\n re.reporting_currency = self.reporting_currency\n print('Fixed deposits matured before {}'.format(self.calculation_date))\n for ins in MutualFunds.all_instances:\n if isinstance(ins, MutualFunds) and ins.active:\n self.amount += ins.calculate_value(reporting_engine=re, date=self.calculation_date)\n print(ins.mf_name, ' ', ins.unit, ' ', ins.calculate_value(reporting_engine=re, date=self.calculation_date))\n\n print('Total {} {}'.format(self.amount, self.reporting_currency))","sub_path":"fin_data_analyser/MFValue.py","file_name":"MFValue.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"211278217","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nThis module contains functionality for dealing with the Traffic Ops ReST API\n\"\"\"\n\nimport datetime\nimport typing\nimport logging\nimport requests\n\nfrom . import packaging\n\n#: Caches update statuses mapped by hostnames\nCACHED_UPDATE_STATUS = {}\n\n#: Caches the names of statuses supported by Traffic Ops\nCACHED_STATUSES = []\n\n#: Maps Traffic Ops alert levels to logging levels\nAPI_LOGGERS = {\"error\": lambda x: logging.error(\"Traffic Ops API alert: %s\", x),\n \"warning\": lambda x: logging.warning(\"Traffic Ops API alert: %s\", x),\n \"info\": lambda x: logging.info(\"Traffic Ops API alert: %s\", x),\n \"success\": lambda x: logging.info(\"Traffic Ops API alert: %s\", x)}\n\nclass ServerInfo():\n\t\"\"\"\n\tHolds information about a server, as returned by the Traffic Ops API\n\t``api/1.x/servers//configfiles/ats`` endpoint\n\t\"\"\"\n\n\tcdnId = -1 #: A database primary key for the CDN to which this server is assigned\n\tcdnName = \"\" #: The name of the CDN to which this server is assigned\n\tprofileName = \"\" #: The name of the profile in use by this server\n\tprofileId = -1 #: A database primary key for this server's profile's information\n\tserverId = -1 #: A database primary key for this server's information\n\tserverIpv4 = \"\" #: This server's IPv4 address\n\tserverName = \"\" #: This server's short hostname\n\tserverTcpPort = 80 #: The port on which the caching proxy of this server listens\n\ttoUrl = \"\" #: The Traffic Ops URL... not sure what that's for...\n\n\t#: This specifies the url of a reverse proxy that should be used for future requests to the\n\t#: Traffic Ops API - if present.\n\ttoRevProxyUrl = \"\"\n\n\tdef __init__(self, raw:dict):\n\t\t\"\"\"\n\t\tConstructs a server object out of some kind of raw API response\n\n\t\t:param raw: some kind of ungodly huge JSON object from one API endpoint or\n\t\t\tanother. Attempts will be made to resolve inconsistent naming accross\n\t\t\tendpoints.\n\t\t:raises ValueError: when the passed object doesn't have all required fields\n\t\t\"\"\"\n\t\ttry:\n\t\t\tself.cdnId = raw[\"cdnId\"]\n\t\t\tself.cdnName = raw[\"cdnName\"]\n\t\t\tself.profileName = raw[\"profileName\"]\n\t\t\tself.profileId = raw[\"profileId\"]\n\t\t\tself.serverId = raw[\"serverId\"]\n\t\t\tself.serverIpv4 = raw[\"serverIpv4\"]\n\t\t\tself.serverName = raw[\"serverName\"]\n\t\t\tself.serverTcpPort = raw[\"serverTcpPort\"]\n\t\t\tself.toUrl = raw[\"toUrl\"]\n\n\t\t\t# This may or may not exist\n\t\t\tif \"toRevProxyUrl\" in raw:\n\t\t\t\tself.toRevProxyUrl = raw[\"toRevProxyUrl\"]\n\t\texcept (KeyError, TypeError) as e:\n\t\t\traise ValueError from e\n\n\n\tdef __repr__(self) -> str:\n\t\t\"\"\"\n\t\tImplements ``str(self)``\n\t\t\"\"\"\n\t\tout = \"Server(%s)\"\n\t\treturn out % ', '.join((\"%s=%r\" % (a, self.__getattribute__(a))\\\n\t\t for a in dir(self)\\\n\t\t if not a.startswith('_')))\n\n\tdef sanitize(self, fmt:str) -> str:\n\t\t\"\"\"\n\t\tImplements ``str.format(self)``\n\t\t\"\"\"\n\t\tfrom .configuration import HOSTNAME\n\t\tfmt = fmt.replace(\"__HOSTNAME__\", HOSTNAME[0])\n\t\tfmt = fmt.replace(\"__FULL_HOSTNAME__\", HOSTNAME[1])\n\t\tfmt = fmt.replace(\"__RETURN__\", '\\n')\n\t\tfmt = fmt.replace(\"__CACHE_IPV4__\", self.serverIpv4)\n\n\t\t# Don't ask me why, but the reference ORT implementation just strips these ones out\n\t\t# if the tcp port is 80.\n\t\treturn fmt.replace(\"__SERVER_TCP_PORT__\", str(self.serverTcpPort)\\\n\t\t if self.serverTcpPort != 80 else \"\")\n\ndef TOPost(uri:str, data:dict) -> str:\n\t\"\"\"\n\tPOSTs the passed data in a request to the specified API endpoint\n\n\t:param uri: The Traffic Ops URL-relative path to an API endpoint, e.g. if the intention is\n\t\tto post to ``https://TO_URL:TO_PORT/api/1.3/users``, this should just be\n\t\t``'api/1.3/users'``\n\n\t\t\t.. note:: This function will ensure the proper concatenation of the Traffic Ops URL\n\t\t\t\tto the request path; callers need not worry about whether the ``uri`` ought to\n\t\t\t\tbegin with a slash.\n\n\t:returns: The Traffic Ops server's response to the POST request - possibly empty - as a UTF-8\n\t\tstring\n\t:raises ConnectionError: when an error occurs trying to communicate with Traffic Ops\n\t\"\"\"\n\tfrom . import configuration as conf\n\n\turi = '/'.join((conf.TO_URL, uri.lstrip('/')))\n\tlogging.info(\"POSTing %r to %s\", data, uri)\n\n\ttry:\n\t\tresp = requests.post(uri, cookies=conf.getTOCookie(), verify=conf.VERIFY, data=data)\n\texcept (PermissionError, requests.exceptions.RequestException) as e:\n\t\traise ConnectionError from e\n\n\tlogging.debug(\"Raw response from Traffic Ops: %s\\n%s\\n%s\", resp, resp.headers, resp.content)\n\n\treturn resp.text\n\ndef getTOJSONResponse(uri:str) -> dict:\n\t\"\"\"\n\tA wrapper around :func:`traffic_ops_ort.utils.getJSONResponse` that handles cookies and\n\ttacks on the top-level Traffic Ops URL.\n\n\t:param uri: The Traffic Ops URL-relative path to a JSON API endpoint, e.g. if the intention\n\t\tis to get ``https://TO_URL:TO_PORT/api/1.3/ping``, this should just be ``'api/1.3/ping'``\n\n\t\t\t.. note:: This function will ensure the proper concatenation of the Traffic Ops URL\n\t\t\t\tto the request path; callers need not worry about whether the ``uri`` ought to\n\t\t\t\tbegin with a slash.\n\n\t:returns: The decoded JSON response as an object\n\n\t\t\t.. note:: If the API response containes a 'response' object, this function will\n\t\t\t\tonly return that object. Also, if the API response contains an 'alerts' object,\n\t\t\t\tthey will be logged appropriately\n\n\t:raises ConnectionError: when an error occurs trying to communicate with Traffic Ops\n\t:raises ValueError: when the request completes successfully, but the response body\n\t\tdoes not represent a JSON-encoded object.\n\t\"\"\"\n\tglobal API_LOGGERS\n\tfrom . import configuration as conf, utils\n\n\turi = '/'.join((conf.TO_URL, uri.lstrip('/')))\n\tlogging.info(\"Fetching Traffic Ops API response: %s\", uri)\n\n\tif datetime.datetime.now().timestamp() >= conf.TO_COOKIE.expires:\n\t\ttry:\n\t\t\tconf.getNewTOCookie()\n\t\texcept PermissionError as e:\n\t\t\traise ConnectionError from e\n\n\tresp = utils.getJSONResponse(uri,\n\t cookies = {conf.TO_COOKIE.name:conf.TO_COOKIE.value},\n\t verify = conf.VERIFY)\n\n\tif \"response\" in resp:\n\t\tif \"alerts\" in resp:\n\t\t\tfor alert in resp[\"alerts\"]:\n\t\t\t\tif \"level\" in alert:\n\t\t\t\t\tmsg = alert[\"text\"] if \"text\" in alert else \"Unkown\"\n\t\t\t\t\tAPI_LOGGERS[alert[\"level\"]](msg)\n\t\t\t\telif \"text\" in alert:\n\t\t\t\t\tlogging.warning(\"Traffic Ops API alert: %s\", alert[\"text\"])\n\t\t\t\t\tlogging.debug(\"Weird alert encountered: %r\", alert)\n\n\n\t\treturn resp[\"response\"]\n\n\treturn resp\n\ndef getUpdateStatus(host:str) -> dict:\n\t\"\"\"\n\tGets the update status of a server.\n\n\t.. note:: If the global :data:`CACHED_UPDATE_STATUS` cached response is set, this function will\n\t\tdefault to that object. If it is *not* set, then this function will set it.\n\n\t:param host: The (short) hostname of the server to query\n\t:raises ValueError: if ``host`` is not a :const:`str`\n\t:raises PermissionError: if a new cookie is required, but fails to be aquired\n\t:returns: An object representing the API's response\n\t\"\"\"\n\tglobal CACHED_UPDATE_STATUS\n\n\tlogging.info(\"Fetching update status for %s from Traffic Ops\", host)\n\tif not isinstance(host, str):\n\t\traise ValueError(\"First argument ('host') must be 'str', not '%s'\" % type(host))\n\n\tif host in CACHED_UPDATE_STATUS:\n\t\treturn CACHED_UPDATE_STATUS[host]\n\n\tCACHED_UPDATE_STATUS[host] = getTOJSONResponse(\"api/1.3/servers/%s/update_status\" % host)\n\n\treturn CACHED_UPDATE_STATUS[host]\n\ndef getMyStatus() -> str:\n\t\"\"\"\n\tFetches the status of this server as set in Traffic Ops\n\n\t:raises ConnectionError: if fetching the status fails\n\t:raises ValueError: if the :data:`traffic_ops_ort.configuration.HOSTNAME` is not properly set,\n\t\tor a weird value is stored in the global :data:`CACHED_UPDATE_STATUS` response cache.\n\t:returns: the name of the status to which this server is set in the Traffic Ops configuration\n\n\t.. note:: If the global :data:`CACHED_UPDATE_STATUS` cached response is set, this function will\n\t\tdefault to the status provided by that object.\n\t\"\"\"\n\tglobal CACHED_UPDATE_STATUS\n\tfrom .configuration import HOSTNAME\n\n\ttry:\n\t\tif HOSTNAME[0] in CACHED_UPDATE_STATUS:\n\t\t\tmyStatus = CACHED_UPDATE_STATUS[HOSTNAME[0]]\n\t\t\tif \"status\" in CACHED_UPDATE_STATUS[HOSTNAME[0]]:\n\t\t\t\treturn CACHED_UPDATE_STATUS[HOSTNAME[0]][\"status\"]\n\n\t\t\tlogging.warning(\"CACHED_UPDATE_STATUS possibly set improperly\")\n\t\t\tlogging.warning(\"clearing this server's cached entry!\")\n\t\t\tlogging.debug(\"value was %r\", myStatus)\n\t\t\tdel CACHED_UPDATE_STATUS[HOSTNAME[0]]\n\n\texcept (IndexError, KeyError) as e:\n\t\traise ValueError from e\n\n\tmyStatus = getUpdateStatus(HOSTNAME[0])\n\n\ttry:\n\t\treturn myStatus[0][\"status\"]\n\texcept (IndexError, KeyError) as e:\n\t\tlogging.error(\"Malformed response from Traffic Ops to update status request!\")\n\t\traise ConnectionError from e\n\ndef getStatuses() -> typing.Generator[str, None, None]:\n\t\"\"\"\n\tYields a successive list of statuses supported by Traffic Ops.\n\n\t.. note:: This is implemented by iterating the :data:`CACHED_STATUSES` global cache -\n\t\tfirst populating it if it is empty - and so the validity of its outputs\n\t\tdepends on the validity of the data stored therein\n\n\t:raises ValueError: if a response from the TO API is successful, but cannot be parsed as\n\t\tJSON\n\t:raises TypeError: if :data:`CACHED_STATUSES` is not iterable\n\t:raises ConnectionError: if something goes wrong contacting the Traffic Ops API\n\t:returns: an iterable generator that yields status names as strings\n\t\"\"\"\n\tglobal CACHED_STATUSES\n\n\tlogging.info(\"Retrieving statuses from Traffic Ops\")\n\n\tif CACHED_STATUSES:\n\t\tlogging.debug(\"Using cached statuses: %r\", CACHED_STATUSES)\n\t\tyield from CACHED_STATUSES\n\telse:\n\t\tstatuses = getTOJSONResponse(\"api/1.3/statuses\")\n\t\tyield from statuses\n\ndef getMyPackages() -> typing.List[packaging.Package]:\n\t\"\"\"\n\tFetches a list of the packages specified by Traffic Ops that should exist on this server.\n\n\t:returns: all of the packages which this system must have, according to Traffic Ops.\n\t:raises ConnectionError: if fetching the package list fails\n\t:raises ValueError: if the API endpoint returns a malformed response that can't be parsed\n\t\"\"\"\n\tfrom .configuration import HOSTNAME\n\n\tlogging.info(\"Fetching this server's package list from Traffic Ops\")\n\n\tmyPackages=getTOJSONResponse('/'.join((\"ort\", HOSTNAME[0], \"packages\")))\n\n\tlogging.debug(\"Raw package response: %r\", myPackages)\n\n\treturn [packaging.Package(p) for p in myPackages]\n\ndef updateTrafficOps():\n\t\"\"\"\n\tUpdates Traffic Ops's knowledge of this server's update status.\n\t\"\"\"\n\tfrom .configuration import MODE, Modes, HOSTNAME\n\tfrom .utils import getYesNoResponse as getYN\n\n\tif MODE is Modes.INTERACTIVE and not getYN(\"Update Traffic Ops?\", default='Y'):\n\t\tlogging.warning(\"Update will not be performed; you should do this manually\")\n\t\treturn\n\n\tlogging.info(\"Updating Traffic Ops\")\n\n\tif MODE is Modes.REPORT:\n\t\treturn\n\n\tpayload = {\"updated\": False, \"reval_updated\": False}\n\tresponse = TOPost(\"/update/%s\" % HOSTNAME[0], payload)\n\n\tif response:\n\t\tlogging.info(\"Traffic Ops response: %s\", response)\n\ndef getMyConfigFiles() -> typing.List[dict]:\n\t\"\"\"\n\tFetches configuration files constructed by Traffic Ops for this server\n\n\t.. note:: This function will set the :data:`traffic_ops_ort.configuration.SERVER_INFO`\n\t\tobject to an instance of :class:`ServerInfo` with the provided information.\n\n\t:returns: A list of constructed config file objects\n\t:raises ConnectionError: when something goes wrong communicating with Traffic Ops\n\t:raises ValueError: when a response was successfully obtained from the Traffic Ops API, but the\n\t\tresponse could not successfully be parsed as JSON, or was missing information\n\t\"\"\"\n\tfrom . import configuration\n\n\turi = \"/api/1.3/servers/%s/configfiles/ats\" % configuration.HOSTNAME[0]\n\n\tmyFiles = getTOJSONResponse(uri)\n\n\ttry:\n\t\tconfiguration.SERVER_INFO = ServerInfo(myFiles[\"info\"])\n\t\treturn myFiles[\"configFiles\"]\n\texcept KeyError as e:\n\t\traise ValueError from e\n\ndef getMyChkconfig() -> typing.List[dict]:\n\t\"\"\"\n\tFetches the 'chkconfig' for this server\n\n\t:returns: An iterable list of 'chkconfig' entries\n\t:raises ConnectionError: when something goes wrong communicating with Traffic Ops\n\t:raises ValueError: when a response was successfully obtained from the Traffic Ops API, but the\n\t\tresponse could not successfully be parsed as JSON, or was missing information\n\t\"\"\"\n\tfrom . import configuration\n\n\turi = \"/ort/%s/chkconfig\" % configuration.HOSTNAME[0]\n\tlogging.info(\"Fetching chkconfig from %s\", uri)\n\n\treturn getTOJSONResponse(uri)\n","sub_path":"infrastructure/cdn-in-a-box/ort/traffic_ops_ort/to_api.py","file_name":"to_api.py","file_ext":"py","file_size_in_byte":13233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"411261895","text":"# coding: utf-8\n# process_koreader: add some \"\" for kobo ereader\nimport os\nimport sys\n\ndef list_folders_files(path, suffixes_filters = []):\n list_folders = []\n list_files = []\n for file in os.listdir(path):\n file_path = os.path.join(path, file)\n if os.path.isdir(file_path):\n list_folders.append(file)\n else:\n file_ext = os.path.splitext(file)[-1]\n ignore_this_file = 0\n if (suffixes_filters is not None):\n ignore_this_file = 1\n for suffix in suffixes_filters:\n if (file_ext.upper() == suffix.upper()):\n ignore_this_file = 0\n break\n if (ignore_this_file == 0):\n list_files.append(file)\n return (list_folders, list_files)\n\n\ndef processTitle(input_line_buff, output_buff_array):\n str_span_bold = ''\n index1 = input_line_buff.find(str_span_bold)\n index2 = input_line_buff.find('', index1)\n\n str_title_chapter = input_line_buff[index1 + len(str_span_bold) : index2]\n\n index3 = input_line_buff.find(str_span_bold, index2)\n index4 = input_line_buff.find('', index3)\n\n str_title = input_line_buff[index3 + len(str_span_bold) : index4]\n\n if (index1 >= 0 and index3 >= 0):\n output_buff_array.append('

' + str_title_chapter.strip() + '
' + str_title.strip() + '

' + '\\n\\n')\n elif (index1 >= 0):\n output_buff_array.append('

' + str_title_chapter.strip() + '

' + '\\n\\n')\n else:\n output_buff_array.append(input_line_buff)\n\ndef processParagraph(input_line_buff, output_buff_array):\n '''\n add additional for kobo reader\n '''\n index2 = -1\n index1 = input_line_buff.find('

= 0):\n index2 = input_line_buff.find('>', index1 + 2)\n if ((index1 < 0) or (index2 < 0)):\n output_buff_array.append(input_line_buff)\n return\n\n index6 = input_line_buff.find('> 

')\n if (index6 >= 0):\n return # ignore\n\n index3 = input_line_buff.find('', index1)\n if (index3 > 0):\n processTitle(input_line_buff, output_buff_array)\n return\n\n # modify \"

\" to \"

\"\n index3 = input_line_buff.find('class=\\\"calibre_9\\\"', index1, index2)\n if (index3 > 0):\n output_buff_array.append(input_line_buff[0 : index1])\n # print(str(index1) + ' ' + input_line_buff[0 : index1])\n\n output_buff_array.append('

')\n output_buff_array.append(input_line_buff[(index2 + 1) : ])\n return\n\n '''\n key_classes = ['shi', 'ci', 'qu']\n for class_name in key_classes:\n #

\n class_name_full = 'class=' + '\\\"' + class_name + '\\\"'\n index3 = input_line_buff.find(class_name_full, index1, index2)\n index4 = -1\n if (index3 > 0):\n index4 = input_line_buff.find('

', index2)\n\n if (index4 > 0):\n #

\n output_buff_array.append(input_line_buff[:(index2 + 1)])\n\n # \n output_buff_array.append('')\n # output_buff_array.append('')\n\n # append content:\n output_buff_array.append(input_line_buff[(index2 + 1) : index4])\n\n output_buff_array.append('')\n output_buff_array.append('

' + '\\n')\n return\n '''\n\n # just copy original content:\n output_buff_array.append(input_line_buff)\n\ndef processLine(input_line_buff, output_buff_array):\n if (len(input_line_buff) <= 0):\n return\n\n index1 = input_line_buff.find('= 0):\n index2 = input_line_buff.find('>', index1)\n if (index2 > 0):\n output_buff_array.append('')\n output_buff_array.append(input_line_buff[(index2 + 1):])\n return\n\n index1 = input_line_buff.find('
= 0):\n index2 = input_line_buff.find('
', index1)\n if (index2 > 0):\n output_buff_array.append(input_line_buff[(index2 + 6) : ])\n return\n\n find_index = input_line_buff.find('') > 0):\n line_buf += ('\\n')\n outputLineBuff.append(line_buf)\n\n if (len(outputLineBuff) < 1):\n return\n\n output_folder = './out'\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n\n target_file = os.path.join(output_folder, fileName)\n with open(target_file, 'wt') as file:\n for line_buff in outputLineBuff:\n file.write(line_buff)\n\nif __name__ == '__main__':\n # batRename(sys.argv)\n suffixes_filters = []\n\n suffixes_filters.append(\".html\")\n (list_folders, list_files) = list_folders_files('./text', suffixes_filters)\n print(\"files: %d\" % len(list_files))\n\n for item in sorted(list_files):\n processFile2('./text', item)\n","sub_path":"source/process_kobo_reader.py","file_name":"process_kobo_reader.py","file_ext":"py","file_size_in_byte":5912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"349893994","text":"'''\nGiven a binary tree, find its minimum depth.\nThe minimum depth is the number of nodes along the shortest path from the root node down to the nearest leaf node.\nNote: A leaf is a node with no children.\nExample:\nGiven binary tree [3,9,20,null,null,15,7],\n 3\n / \\\n 9 20\n / \\\n 15 7\nreturn its minimum depth = 2.\n'''\n\n# Definition for a binary tree node.\n\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n # 64.48% 好久没做题,这道easy竟然都改了好几次才改对,感觉没救了\n def minDepth1(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n def isLeaf(root):\n if not root:\n return 0\n else:\n if root.right != None and root.left != None:\n return 1 + min(isLeaf(root.left), isLeaf(root.right))\n else:\n return 1 + max(isLeaf(root.left), isLeaf(root.right))\n\n return isLeaf(root)\n\n # 加载包导致速度慢了\n def minDepth2(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n from collections import deque\n # check empty\n if root == None:\n return 0\n level = 1\n tempQ = deque()\n tempQ.append(root)\n while True:\n # 首先判断第一层结点,如果第一层结点有子结点,这个子结点就会被加入tempQ,之后判断第二层结点,如果有一个结点是子结点全为空的,\n # 那就返回2,因为既然已经到了判断第二层结点,那就说明第一层是有子结点的,深度为2\n size = len(tempQ)\n for _ in range(size):\n node = tempQ.popleft()\n isLeaf = True\n if node.left:\n tempQ.append(node.left)\n isLeaf = False\n if node.right:\n tempQ.append(node.right)\n isLeaf = False\n if isLeaf:\n return level # 如果在某一层碰到左右子结点都为空的结点就会返回\n level += 1\n\n # 90.95%\n def minDepth3(self, root):\n if root is None:\n return 0\n if self.isLeaf(root):\n return 1\n\n cnt = 2\n l = [root] # Nodes inside l is guaranteed to be non-leaf\n while True:\n new = []\n for tree in l:\n if tree.left is not None:\n if self.isLeaf(tree.left):\n return cnt\n new.append(tree.left)\n if tree.right is not None:\n if self.isLeaf(tree.right):\n return cnt\n new.append(tree.right)\n l = new\n cnt += 1\n\n def isLeaf(self, root):\n return root.lef\n\n # 32.14%\n def minDepth(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if not root:\n return 0\n left = self.minDepth(root.left)\n right = self.minDepth(root.right)\n if left == 0 and right == 0:\n return 1\n elif left == 0:\n return right + 1\n elif right == 0:\n return left + 1\n return min(left, right) + 1\n\n\nso = Solution()\n\nTreeNode1 = TreeNode(1)\nTreeNode2 = TreeNode(2)\nTreeNode3 = TreeNode(3)\nTreeNode4 = TreeNode(4)\nTreeNode5 = TreeNode(5)\nTreeNode1.left = TreeNode2\nTreeNode1.right = TreeNode3\nTreeNode2.left = TreeNode4\nTreeNode3.right = TreeNode5\nprint(so.minDepth(TreeNode1))","sub_path":"Algorithm101-150/111_Minimum_Depth_of_Binary_Tree.py","file_name":"111_Minimum_Depth_of_Binary_Tree.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"443218899","text":"# Regex exemple\n# Metoda pattern=re.compile()\n# pattern = re.compile(r\"\")\n#\n# (ab)* zero sau oricate aparitii\n# (ab)+ 1 sau oricate aparitii\n# \\w - orice caracter alfanumeric\n# \\W orice caracter non alfanumeric\n# \\d orice cifra\n# \\D orice nu e cifra\n# () grupuri de caractere\n# {m, n} - interval care specifica numarul de aparitii\n#\n# re.match(pattern, test_string)\n# ^ - indica inceputul stringului\n# $ - indica finalul stringului\n\n\n\nimport re\nclass CallCab:\n\n def __init__(self, phone_number):\n self.phone = phone_number\n\n def check_number(self, phone):\n pattern = re.compile(r\"^(\\d{2})-(\\d{4})-(\\d{4})-(\\d{4})$\")\n if re.match(pattern, phone):\n print (\"Found a valid phone number\")\n return True\n return False\n \n def __call__(self):\n print(self.phone)\n\n\n\ncc = CallCab(\"10-10-10-10\")\nprint(cc.check_number(\"10-1000-2000-6000\"))\ncc()\n","sub_path":"python_msys/ziua1/CallCab.py","file_name":"CallCab.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"475667599","text":"import requests\nimport json\n\n# Get culprit name from spotify API\nHINT_URL = \"https://api.spotify.com/v1/playlists\"\nculprit_playlist_id = \"4wRfyALTv1jNL7HScIhd1S\"\nspotify_token = \"BQCnevGOQcUclx-zbEg4DRZXPLmE4o2y1I135ZlRRgMHsK0y3oElz3d8ySXmS-afU8rUtmgM0rGj6BqVlAY_6jPcXkKIwN2um3ZwVINEXpDGohZWrDnFoG3eYsLvsZ7XpMTbEIiQP5lB5u4IHjZQc2eevxfToGS2zw\" # A remplacer\n\nculprit_information = requests.get(\n HINT_URL + \"/\" + culprit_playlist_id,\n headers={\"Authorization\": \"Bearer {}\".format(spotify_token)},\n).content.decode()\n\nculprit_name = json.loads(culprit_information)[\"owner\"][\"display_name\"]\n\n# Send culprit name to the police\nCULPRIT_URL = \"http://ripper.theo.do/api/culprit\"\nuser_token = \"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJ0b2tlbl90eXBlIjoiYWNjZXNzIiwiZXhwIjoxNTcxODQ0Mjg1LCJqdGkiOiI4NjRmY2ZjZjkxZGU0N2JkOTM4MjEwYzI2YWYwNWM4OCIsInVzZXJfaWQiOjIzM30.irR-H4MsqpKg-koOaU1kctRsQQzv7cZ8TYFbt-T-09M\" # A remplacer\n\npolice_answer = requests.post(\n CULPRIT_URL,\n data={\"culprit_name\": culprit_name},\n headers={\"Authorization\": \"Bearer {}\".format(user_token)},\n).content.decode()\n\nprint(police_answer)\n","sub_path":"riddle5/riddle_5.py","file_name":"riddle_5.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"67150938","text":"import sys\nfrom pathlib import Path\n\ndef main(argv): \n filenya = Path(argv[0]).resolve()\n lines = open(filenya).read().split(\"\\n\")\n jumlah = len(lines)\n\n # a = [line.replace(' ', '') for line in lines[urutan]]\n # lines = [line.replace(' ', '') for line in lines]\n\n dump = []\n for urutan in range(len(lines)):\n a = lines[urutan].replace('\\t','').split(' ')[0]\n dump.append({\"no\" : urutan, \"data\" : a[::-1]})\n\n # for urutan in range(len(lines)):\n # a = lines[urutan].replace('\\t','').split(' ')[0]\n # for data in dump:\n # aa = data['data']\n # if a == \"\":\n # print(\"aaa\")\n # print(str(urutan) + \" \" + aa)\n # if aa == a:\n # print(data['no'])\n \n for data in dump:\n for urutan in range(len(lines)):\n # normal\n aa = lines[urutan].replace('\\t','').split(' ')[0]\n if data['data'] == '':\n continue\n if data['data'] == aa:\n print(data['no'])\n # print(str(data['no']) + ' ' + data['data'])\n \n # for urutan in range(len(lines)):\n # a = lines[urutan].replace('\\t','')\n # a = a.split(' ')\n # if a[0] == 'cetak':\n # print(a[1])\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"477229643","text":"from django.conf.urls import include, url\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework import permissions\n\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Geo API\",\n default_version='v1',\n description=\"Mozio Backend REST API\",\n terms_of_service=\"https://www.google.com/policies/terms/\",\n contact=openapi.Contact(email=\"atykhonov@gmail.com\"),\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n url(r'api/', include('geo.urls')),\n url(\n r'^swagger(?P\\.json|\\.yaml)$',\n schema_view.without_ui(cache_timeout=0), name='schema-json'\n ),\n url(\n r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0),\n name='schema-swagger-ui'\n ),\n url(\n r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0),\n name='schema-redoc'\n ),\n]\n","sub_path":"moz/moz/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"417347107","text":"from operator import itemgetter\n\nfrom pycompass.query import run_query, query_getter\nimport numpy as np\n\nfrom pycompass.utils import get_factory\n\n\ndef new__init__(self, *args, **kwargs):\n raise ValueError('Compendium object should be created using Connect.get_compendium() or Connect.get_compendia() methods!')\n\n\nclass Compendium(metaclass=get_factory(new__init__)):\n\n def __init__(self, *args, **kwargs):\n self.compendium_name = kwargs['compendium_name']\n self.connection = kwargs['connection']\n self.compendium_full_name = kwargs['compendium_full_name']\n self.description = kwargs['description']\n self.normalization = {}\n for n in kwargs['normalization']:\n self.normalization[n] = self.__get_score_rank_methods__(n)\n\n return self\n\n def get_data_sources(self, filter=None, fields=None):\n '''\n Get the experiments data sources both local and public\n\n :param filter: return results that match only filter values\n :param fields: return only specific fields\n :return: list of dict\n '''\n @query_getter('dataSources', ['id', 'sourceName', 'isLocal'])\n def _get_data_sources(obj, filter=None, fields=None):\n pass\n return _get_data_sources(self, filter=filter, fields=fields)\n\n def get_platform_types(self, filter=None, fields=None):\n '''\n Get the platform types\n\n :param filter: return results that match only filter values\n :param fields: return only specific fields\n :return: list of dict\n '''\n @query_getter('platformTypes', ['id', 'name', 'description'])\n def _get_platform_types(obj, filter=None, fields=None):\n pass\n return _get_platform_types(self, filter=filter, fields=fields)\n\n def rank_sample_sets(self, module, rank_method=None, cutoff=None):\n '''\n Rank all sample sets on the module's biological features using rank_method\n\n :param rank_method:\n :param cutoff:\n :return:\n '''\n bf = [_bf.id for _bf in module.biological_features]\n query = '''\n {{\n ranking(compendium:\"{compendium}\", normalization:\"{normalization}\", rank:\"{rank}\", \n biofeaturesIds:[{biofeatures}]) {{\n id,\n name,\n value\n }}\n }}\n '''.format(compendium=self.compendium_name, normalization=module.normalization, rank=rank_method,\n biofeatures='\"' + '\",\"'.join(bf) + '\"')\n json = run_query(self.connection.url, query)\n r = json['data']\n if cutoff:\n idxs = [i for i, v in enumerate(r['ranking']['value']) if v >= cutoff]\n r['ranking']['id'] = itemgetter(*idxs)(r['ranking']['id'])\n r['ranking']['name'] = itemgetter(*idxs)(r['ranking']['name'])\n r['ranking']['value'] = itemgetter(*idxs)(r['ranking']['value'])\n return r\n\n def rank_biological_features(self, module, rank_method=None, cutoff=None):\n '''\n Rank all biological features on the module's sample set using rank_method\n\n :param rank_method:\n :param cutoff:\n :return:\n '''\n ss = [ss.id for ss in module.sample_sets]\n query = '''\n {{\n ranking(compendium:\"{compendium}\", normalization:\"{normalization}\", rank:\"{rank}\", \n samplesetIds:[{sample_set}]) {{\n id,\n name,\n value\n }}\n }}\n '''.format(compendium=self.compendium_name, normalization=module.normalization, rank=rank_method,\n sample_set='\"' + '\",\"'.join(ss) + '\"')\n json = run_query(self.connection.url, query)\n r = json['data']\n if cutoff:\n idxs = [i for i,v in enumerate(r['ranking']['value']) if v >= cutoff]\n r['ranking']['id'] = itemgetter(*idxs)(r['ranking']['id'])\n r['ranking']['name'] = itemgetter(*idxs)(r['ranking']['name'])\n r['ranking']['value'] = itemgetter(*idxs)(r['ranking']['value'])\n return r\n\n def get_score_rank_methods(self, normalization):\n '''\n Get all the available ranking methods for biological features and sample sets\n\n :param normalization:\n :return:\n '''\n return self.__get_score_rank_methods__(normalization)['scoreRankMethods']\n\n def __get_score_rank_methods__(self, normalization):\n query = '''\n {{\n scoreRankMethods(compendium:\"{compendium}\", normalization:\"{normalization}\") {{\n sampleSets,\n biologicalFeatures\n }}\n }}\n '''.format(compendium=self.compendium_name, normalization=normalization)\n json = run_query(self.connection.url, query)\n return json['data']\n","sub_path":"day_3_vespucci/pycompass/compendium.py","file_name":"compendium.py","file_ext":"py","file_size_in_byte":4956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"501776500","text":"import os\nimport csv\nimport datetime as dt\n\nCurrentDate = dt.datetime.today().strftime(\"%m/%d/%Y\")\nscript_path = os.path.abspath('/Users/macbook/Desktop/Data-Program-Files/HW/HW3/Instructions/PyPoll/Resources/election_data.csv')\nscript_dir = os.path.split('/Users/macbook/Desktop/Data-Program-Files/HW/HW3/Instructions/PyPoll/Resources/')[0]\nrel_path = \"election_data.csv\"\nelectiondata = os.path.join(script_dir, rel_path)\n\n#Set the empty variables\nCounty= []\nCandidate = []\nUCandidate =[]\nVoteCountC = []\nVotePercentC =[]\nCTotal = 0\n\nwith open(electiondata,'r') as csvFile:\n ReadData = csv.reader(csvFile, delimiter=',')\n next(ReadData, None)\n \n for row in ReadData: \n #Append data from the row\n CTotal = CTotal + 1\n Candidate.append(row[2])\n for x in set(Candidate):\n UCandidate.append(x)\n TotC = Candidate.count(x)\n VoteCountC.append(TotC)\n VotePercentC.append(Candidate.count(x)/CTotal)\n\n Winner = UCandidate[VoteCountC.index(max(VoteCountC))]\n \n print(\"Election Results for the file election_data.csv\")\n print(str(CurrentDate))\n print(\"Total Votes: \" + str(CTotal))\n for i in range(len(set(Candidate))):\n print(UCandidate[i] + \": \" + str(round(VotePercentC[i]*100,1)) +\"% (\" + str(VoteCountC[i]) + \")\")\n print(\"Winner: \" + str(Winner))\n\n save_path = '/Users/macbook/Desktop/Data-Program-Files/HW/HW3/Instructions/PyPoll/Resources/'\n name_of_file = \"Election_Results\"\n completeName = os.path.join(save_path, name_of_file+\".txt\")\n\n with open(completeName, 'w+') as text:\n text.write(\"Election Results for the file 'election_data\"+ \".csv'\"+\"\\n\")\n text.write(\"Election Results\"+\"\\n\")\n text.write(str(CurrentDate)+\"\\n\")\n text.write(\"----------------------------------------------------------\\n\")\n text.write(\"Total Votes: \" + str(CTotal) + \"\\n\")\n text.write(\"----------------------------------------------------------\\n\")\n for i in range(len(set(Candidate))):\n text.write(UCandidate[i] + \": \" + str(round(VotePercentC[i]*100,1)) +\"% (\" + str(VoteCountC[i]) + \")\\n\")\n text.write(\"----------------------------------------------------------\\n\")\n text.write(\"Winner: \" + Winner +\"\\n\")\n text.write(\"----------------------------------------------------------\\n\")\n \n","sub_path":"PyPoll/PyPoll Script Done.py","file_name":"PyPoll Script Done.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"139078433","text":"from bs4 import BeautifulSoup\nimport re, os\nimport calendar\n\n\n\nmonths = {v: k for k,v in enumerate(calendar.month_name)}\nmonths.pop('')\n\nall_sheets = [i for i in os.listdir('./Kill_HTML_comments.fld') if re.match('sheet...\\.htm', i)]\nall_sheets.sort()\nsoup = BeautifulSoup(open('Kill_HTML_comments.fld/sheet003.htm'), 'html.parser')\n\ndef findAllComments(soup):\n return soup.findAll('div', class_=\"msocomtxt\")\n\nevents = []\nfor commentSoup in findAllComments(soup):\n anchor_selector = \"_anchor_{}\".format(commentSoup.get('id').strip('_com'))\n row = soup.find('span', {'id': anchor_selector}).parent.parent.parent\n date = row.findAll('td')[0].text\n\n author_column = None\n for td in row.findAll('td'):\n if len(td.findAll('span')) == 0:\n continue\n author_column = td\n\n author_name = author_column.text.strip()[:-3]\n reservation_time = commentSoup.find('font', class_=\"font0\").text.split('\\r')[0]\n\n events.append((date, author_name, reservation_time))\nprint (*events, sep = \"\\n\")\n\n#print (\"\\n\".join(str(events)))","sub_path":"killthesheet.py","file_name":"killthesheet.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"468072141","text":"#!/usr/bin/env python\n\n##### imports #####\nimport os\nimport sys\nimport time\nfrom testlib.base.base_utils import get_args\nfrom testlib.scripts.android.fastboot import fastboot_steps\nfrom testlib.scripts.android.fastboot import fastboot_utils\n\n##### initialization #####\nglobals().update(vars(get_args(sys.argv)))\nargs = {}\nfor entry in script_args:\n key, val = entry.split(\"=\")\n args[key] = val\nflash_files = args[\"flash_files\"]\npartition_name = args[\"partition_name\"]\npartition_name = partition_name.split(\",\")\nfile_name = args[\"file_name\"]\nfile_name = file_name.split(\",\")\n\n##### test start #####\ntry:\n os.system(\"mkdir -p ./temp/files/flash\")\n fastboot_utils.download_flash_scripts()\n\n fastboot_steps.fastboot_erase_partition(partition_name = partition_name[0], serial = serial)()\n fastboot_steps.fastboot_erase_partition(partition_name = partition_name[1], serial = serial)()\n\n check_point1 = False\n check_point2 = False\n\n fastboot_utils.start_minicom(serial=serial)\n\n os.system(\"fastboot reboot > /dev/null 2>&1\")\n time.sleep(60)\n fastboot_utils.to_fastboot_by_script(serial=serial)\n\n fastboot_utils.kill_minicom()\n\n file_path = \"./temp/files/minicom_result.txt\"\n return_result = open(file_path).readlines()\n for line in return_result:\n if \"read_osloader_img fail\" in line: check_point1 = True\n if \"ELK: copy ELK from SPI in\" in line: check_point2 = True\n\n if not check_point1 or not check_point2:\n \traise Exception(\"The test result did not achieve the desired results\")\n\n fastboot_utils.flash_bxt(zip_file=flash_files, serial=serial)\n os.system(\"sudo rm -rf ./temp\")\n\nexcept:\n fastboot_utils.kill_minicom()\n fastboot_utils.flash_bxt(zip_file=flash_files, serial=serial)\n os.system(\"sudo rm -rf ./temp\")\n raise\n##### test end #####","sub_path":"ACS_v.18.20.4_1/ACS/testlib/scripts/android/fastboot/tests_bxt-p/to_fastboot_with_erase_boot_and_bootloader.py","file_name":"to_fastboot_with_erase_boot_and_bootloader.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"630883230","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 13 15:21:51 2017\n\n@author: aude\n\nAugmentation de la vitesse pour diminuer la tension de surface\n\"\"\"\n\n\n\nfrom fipy import *\nimport random\nimport csv\nimport os, sys\nimport numpy\n\n\nU = 3.\nMobility = 0.55 #ratio of the two viscosities; M_c in Hamouda's paper\nepsilon = 0.35 #code starts going crazy below epsilon=0.1\nl = 0.02 #this is lambda from Hamouda's paper\nduration = 0. #stabilisation phase\nsweeps = 41 #stabilisation vitesse\nstartpoint=0.1\n\n#-----------------------------------------------------------------------\n#------------------------Geometry and mesh------------------------------\n#-----------------------------------------------------------------------\n\n#Space\nL = 1. #length\nW = 1. #width: characteristic length\nb = 1. #gap\n\n#Mesh\ndx = 0.15 #width of controle volume\nnx = 150 #number of controle volume\nmesh = Grid1D(dx=dx, nx=nx)\n\n#-----------------------------------------------------------------------\n#---------------------Description of the fluids-------------------------\n#-----------------------------------------------------------------------\n\n#Parameters of the fluids\nviscosity2 = 1.\nM = Mobility * epsilon**2 #M in Hamouda's paper\nviscosity1 = viscosity2 * Mobility\npermeability1 = permeability2 = 1.\nbeta1 = viscosity1 / permeability1\nbeta2 = viscosity2 / permeability2\n\n#Variable of the fluids\npressure = CellVariable(mesh=mesh, name='pressure')\npressureCorrection = CellVariable(mesh=mesh)\nxVelocity = CellVariable(mesh=mesh, name='X Velocity')\n\nvelocity = FaceVariable(mesh=mesh, rank=1)\n\n#-----------------------------------------------------------------------\n#------------------------Phase-field model------------------------------\n#-----------------------------------------------------------------------\n\n#Order Parameter\nphi = CellVariable(name=r'$\\phi$', mesh=mesh, hasOld=1.)\n\n#Cahn-Hilliard equation\nPHI = phi.arithmeticFaceValue #result more accurate by non-linear interpolation\ncoeff1 = Mobility * l * (6.* PHI*(PHI-1.) + 1)\n## blows up when mobility is between 2.2 and 2.3 and 0.7 and 0.8, while l and epsilon=1\n\neq = (TransientTerm() + ConvectionTerm(velocity) == DiffusionTerm(coeff=coeff1) - DiffusionTerm(coeff=(M, l)))\n\n\n\n\n#-----------------------------------------------------------------------\n#-------------------------Boundary Conditions---------------------------\n#-----------------------------------------------------------------------\nphi.faceGrad.constrain([0], mesh.facesRight)\n#Phase\nx = mesh.cellCenters[0]\n\ndef initialize(phi):\n phi.setValue(0.5*(1+numerix.tanh((x-nx*dx/2)/(2*epsilon))))\n\n\n\ninitialize(phi)\n\nbeta = CellVariable(mesh=mesh, name=r'$\\beta$', value = beta2 * phi + beta1 * (1.-phi))\n#-----------------------------------------------------------------------\n#-------------------------Velocity and pressure-------------------------\n#-----------------------------------------------------------------------\n\n\nxVelocityEq = (ImplicitSourceTerm(coeff=beta) + pressure.grad[0])\n\ncoeff = 1./ beta.arithmeticFaceValue\npressureCorrectionEq = DiffusionTerm(coeff=coeff) - velocity.divergence\n\n#Remove oscillations\nfrom fipy.variables.faceGradVariable import _FaceGradVariable\n\n\n#-----------------------------------------------------------------------\n#-------------------------------Viewers---------------------------------\n#-----------------------------------------------------------------------\n\n#Viewer\nviewer = Viewer(vars = (phi), datamin=-1., datamax=2.)\nviewer2 = Viewer(vars = (xVelocity), datamin=1., datamax=4.)\n\nviewer4 = Viewer(vars = (pressure), datamin=0., datamax=80.)\n\n\n\n#-----------------------------------------------------------------------\n#---------------------------Initialization------------------------------\n#-----------------------------------------------------------------------\n\n#Phase\n\ndexp = 1.\nelapsed = 0.\n\nwhile elapsed < duration:\n phi.updateOld()\n dt = min(100, numerix.exp(dexp))\n elapsed += dt\n dexp += 0.01\n eq.solve(var=phi, dt = dt, solver=LinearGMRESSolver())\n if __name__ == '__main__':\n viewer.plot()\n\n \n\n\n\n#Pressure and velocity\n\nxVelocity.constrain(U, mesh.facesLeft)\n\npressureCorrection.constrain(0., mesh.facesRight)\n\npressureRelaxation = 0.8\nvelocityRelaxation = 0.5\n\n\nfor sweep in range(sweeps):\n ##Solve the Stokes equations to get starred value\n# xVelocityEq.cacheMatrix()\n xres = xVelocityEq.sweep(var=xVelocity, underRelaxation=velocityRelaxation)\n# xmat = xVelocityEq.matrix\n ##update the ap coefficient from the matrix diagonal\n# ap[:] = xmat.takeDiagonal()\n ##update the face velocities based on starred values with the Rhi-Chow correction\n #cell pressure gradient\n presgrad = pressure.grad\n #face pressure gradient\n facepresgrad = _FaceGradVariable(pressure)\n #\n velocity[0] = xVelocity.arithmeticFaceValue + 1. / beta.arithmeticFaceValue * (presgrad[0].arithmeticFaceValue-facepresgrad[0])\n velocity[0, mesh.facesLeft.value] = U\n velocity[0, mesh.facesRight.value] = U\n ##solve the pressure correction equation\n pressureCorrectionEq.cacheRHSvector()\n pres = pressureCorrectionEq.sweep(var=pressureCorrection)\n rhs = pressureCorrectionEq.RHSvector\n ## update the pressure using the corrected value\n pressure.setValue(pressure + pressureRelaxation * pressureCorrection)\n ## update the velocity using the corrected pressure\n xVelocity.setValue(xVelocity - pressureCorrection.grad[0] / beta)\n# xVelocity[0]=U\n# xVelocity[nx-1]=U\n viewer2.plot()\n viewer4.plot()\n\n\ndisplacement = 90.\ntimeStep = 0.8 * dx / U #less than one space step per time step\nelapsed = 0.\n\nwhile elapsed < displacement/U:\n phi.updateOld()\n res = 1e+10\n while res > 1e-6:\n res = eq.sweep(var=phi, dt=timeStep, solver=LinearGMRESSolver())\n beta.setValue(beta2 * phi + beta1 * (1.-phi))\n# raw_input(\"pause\")\n for sweep in range(sweeps):\n ##Solve the Stokes equations to get starred value\n# xVelocityEq.cacheMatrix()\n xres = xVelocityEq.sweep(var=xVelocity, underRelaxation=velocityRelaxation)\n# xmat = xVelocityEq.matrix\n ##update the ap coefficient from the matrix diagonal\n# ap[:] = xmat.takeDiagonal()\n ##update the face velocities based on starred values with the Rhi-Chow correction\n #cell pressure gradient\n presgrad = pressure.grad\n #face pressure gradient\n facepresgrad = _FaceGradVariable(pressure)\n #\n velocity[0] = xVelocity.arithmeticFaceValue + 1. / beta.arithmeticFaceValue * (presgrad[0].arithmeticFaceValue-facepresgrad[0])\n velocity[0, mesh.facesLeft.value] = U\n velocity[0, mesh.facesRight.value] = U\n ##solve the pressure correction equation\n pressureCorrectionEq.cacheRHSvector()\n pres = pressureCorrectionEq.sweep(var=pressureCorrection)\n rhs = pressureCorrectionEq.RHSvector\n ## update the pressure using the corrected value\n pressure.setValue(pressure + pressureRelaxation * pressureCorrection)\n ## update the velocity using the corrected pressure\n xVelocity.setValue(xVelocity - pressureCorrection.grad[0] / beta)\n# xVelocity[0]=U\n# xVelocity[nx-1]=U\n elapsed +=timeStep\n viewer.plot(filename='phi%d_' % elapsed +'.png')\n viewer2.plot(filename='XVelocity%d_' % elapsed +'.png')\n viewer4.plot(filename='pressure%d_' % elapsed +'.png')\n TSVViewer(vars=(phi, xVelocity, pressure,beta)).plot(filename='essaidonne%d_'% elapsed +'.tsv')\n print(elapsed)\n\n\nraw_input(\"pause\")\n","sub_path":"Work_in_progress/sim1Dverif.py","file_name":"sim1Dverif.py","file_ext":"py","file_size_in_byte":7566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"68154171","text":"import logging\nimport os\nimport uuid\n\nfrom django.test import TestCase\nfrom libumccr import libslack, aws\nfrom libumccr.aws import libsqs\nfrom mockito import unstub, mock, when\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\nclass S3EventUnitTestCase(TestCase):\n\n def setUp(self) -> None:\n # Comment the following mock to actually send slack message for this test case. i.e.\n # export SLACK_CHANNEL=#arteria-dev\n #\n os.environ['SLACK_CHANNEL'] = \"#mock\"\n os.environ['SLACK_WEBHOOK_ID'] = \"mock_webhook_id_123\"\n mock_response = mock(libslack.http.client.HTTPResponse)\n mock_response.status = 200\n when(libslack.http.client.HTTPSConnection).request(...).thenReturn('ok')\n when(libslack.http.client.HTTPSConnection).getresponse(...).thenReturn(mock_response)\n\n mock_sqs = aws.client(\n 'sqs',\n endpoint_url='http://localhost:4566',\n region_name='ap-southeast-2',\n aws_access_key_id=str(uuid.uuid4()),\n aws_secret_access_key=str(uuid.uuid4()),\n aws_session_token=f\"{uuid.uuid4()}_{uuid.uuid4()}\"\n )\n when(aws).sqs_client(...).thenReturn(mock_sqs)\n when(libsqs).sqs_client(...).thenReturn(mock_sqs)\n\n def tearDown(self) -> None:\n del os.environ['SLACK_CHANNEL']\n del os.environ['SLACK_WEBHOOK_ID']\n unstub()\n\n def verify_local(self):\n queue_urls = libsqs.sqs_client().list_queues()['QueueUrls']\n logger.info(f\"SQS_QUEUE_URLS={queue_urls}\")\n self.assertIn('4566', queue_urls[0])\n logger.info(f\"-\" * 32)\n\n\nclass S3EventIntegrationTestCase(TestCase):\n # integration test hit actual File or API endpoint, thus, manual run in most cases\n # required appropriate access mechanism setup such as active aws login session\n # uncomment @skip and hit the each test case!\n # and keep decorated @skip after tested\n\n pass\n","sub_path":"data_processors/s3/tests/case.py","file_name":"case.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"302833699","text":"import re\nfrom copy import deepcopy\nfrom unittest import mock\n\nfrom django.db.models.query import QuerySet\nfrom django.utils.functional import cached_property\nfrom django.conf import settings\n\nfrom wagtail.search.backends.elasticsearch6 import (\n Elasticsearch6SearchBackend,\n Elasticsearch6SearchQueryCompiler,\n Elasticsearch6SearchResults,\n Elasticsearch6Mapping,\n)\nfrom wagtail.search.query import PlainText\nfrom wagtail.search.backends.base import FilterFieldError\n\nfrom .utils import get_facet_table, to_float\nfrom .errors import QueryTooLarge\n\nORDERING_RE = re.compile(r\"(?P[\\-\\+]?)(?P(.*))\")\nSPIT_THAT_BITCH_RE = re.compile(r\"(?P[^\\.]+)(?:\\.(?P.*))?\")\nPAGE_SIZE = getattr(settings, \"OSCAR_ELASTICSEARCH_QUERY_PAGE_SIZE\", 100)\nFACET_BUCKET_SIZE = getattr(settings, \"OSCAR_ELASTICSEARCH_FACET_BUCKET_SIZE\", 10)\n\ndef range_pairs(range_definition):\n last_result = None\n for _range in range_definition:\n if last_result is not None:\n yield {\"from\": last_result, \"to\": _range}\n else:\n yield {\"to\": _range}\n\n last_result = _range\n\n yield {\"from\": last_result}\n\n\nclass SearchMapping(Elasticsearch6Mapping):\n default_fields = (\n Elasticsearch6Mapping.all_field_name,\n Elasticsearch6Mapping.edgengrams_field_name,\n )\n\n def get_field_column_name(self, field):\n if field in self.default_fields:\n return field\n return super(SearchMapping, self).get_field_column_name(field)\n\n def get_autocomplete_column_name(self, field):\n return self.get_field_column_name(field).replace(\n \"_edgengrams\", \"_auto_complete\"\n )\n\n def get_document(self, obj):\n doc = super().get_document(obj)\n for field in self.model.get_autocomplete_search_fields():\n field_name = self.get_autocomplete_column_name(field)\n doc[field_name] = field.get_value(obj)\n\n return doc\n\n def get_mapping(self):\n mapping = super().get_mapping()\n properties = mapping[\"doc\"][\"properties\"]\n for field in self.model.get_autocomplete_search_fields():\n field_name = self.get_autocomplete_column_name(field)\n field_mapping = {\"type\": \"completion\"}\n field_mapping.update(field.kwargs.get(\"es_extra\", {}))\n properties[field_name] = field_mapping\n\n return mapping\n\n\nclass SearchResults(Elasticsearch6SearchResults):\n def _facet_to_aggregation(self, facet_name):\n \"Format aggregation definition for ES\"\n facet = self.query_compiler.get_facet(facet_name)\n if facet is None:\n return {}\n\n facet_type = facet[\"type\"]\n full_column_name = self.query_compiler.get_field_name_for_path(facet_name)\n\n if facet_type == self.query_compiler.FACET_TYPE_TERM:\n return {\n \"terms\": {\n \"field\": full_column_name,\n \"size\": FACET_BUCKET_SIZE,\n \"order\": {\"_key\": \"asc\"},\n }\n }\n\n elif facet_type == self.query_compiler.FACET_TYPE_RANGE:\n ranges = range_pairs(facet[\"ranges\"])\n return {\"range\": {\"field\": full_column_name, \"ranges\": list(ranges)}}\n\n def suggestions(self, field):\n \"See if ES can propoase suggestions based of the field name passed\"\n if isinstance(self.query_compiler.query, PlainText):\n field_name = self.query_compiler.get_suggestion_field_name(field)\n body = self._get_es_body(for_count=True)\n body[\"suggest\"] = {\n \"text\": self.query_compiler.query.query_string,\n field: {\"term\": {\"field\": field_name}},\n }\n\n response = self.backend.es.search(\n index=self.backend.get_index_for_model(\n self.query_compiler.queryset.model\n ).name,\n body=body,\n size=0,\n )\n return response[\"suggest\"]\n\n return {}\n\n def facets(self, *field_names):\n \"Fetches facets from ES based on the fieldnames passed\"\n aggregations = {}\n for field_path in field_names:\n field_match = SPIT_THAT_BITCH_RE.match(field_path)\n if field_match is not None:\n field_name = field_match.group(\"field_name\")\n\n # Get field\n field = self.query_compiler._get_filterable_field( # pylint: disable=protected-access\n field_name\n )\n if field is None:\n raise FilterFieldError(\n 'Cannot facet search results with field \"%(field_name)s\". '\n 'Please add index.FilterField(\"%(field_name)s\") to %(model_name)s.search_fields.'\n % {\n \"field_name\": field_name,\n \"model_name\": self.query_compiler.queryset.model.__name__,\n },\n field_name=field_name,\n )\n\n aggregations[field_path] = self._facet_to_aggregation(field_path)\n\n if aggregations:\n unfiltered_index = {\n \"index\": self.backend.get_index_for_model(\n self.query_compiler.queryset.model\n ).name\n }\n unfiltered_body = {\n \"query\": self.query_compiler.get_unfiltered_query(),\n \"size\": 0,\n }\n unfiltered_body[\"aggregations\"] = aggregations\n\n filtered_index = unfiltered_index\n filtered_body = {\"query\": self.query_compiler.get_query(), \"size\": 0}\n filtered_body[\"aggregations\"] = aggregations\n\n multi_request = [\n unfiltered_index,\n unfiltered_body,\n filtered_index,\n filtered_body,\n ]\n\n # Send to Elasticsearch\n response = self.backend.es.msearch(body=multi_request)\n unfiltered_response, filtered_response = response[\"responses\"]\n return (\n unfiltered_response[\"aggregations\"],\n filtered_response[\"aggregations\"],\n )\n\n return {}, {}\n\n def es_filter(self, **filters):\n new = self._clone()\n new.query_compiler = self.query_compiler.clone(es_filters=filters)\n return new\n\n def es_order_by(self, ordering):\n new = self._clone()\n new.query_compiler = self.query_compiler.clone(es_ordering=ordering)\n return new\n\n def _get_results_from_hits(self, hits):\n \"Much more efficient implementation that currently in wagtail ES6\"\n # Get pks from results\n pks = [hit[\"_id\"] for hit in hits]\n\n # query for all pks into a dict\n results = self.query_compiler.queryset.in_bulk(pks)\n\n # Yield results in order given by Elasticsearch\n for index, pk_str in enumerate(pks):\n pk = self.query_compiler.queryset.model._meta.pk.to_python(pk_str)\n\n result = results.get(pk)\n\n if result is not None:\n if self._score_field:\n score = hits[index][\"_score\"]\n setattr(result, self._score_field, score)\n\n yield result\n\n def autocomplete(self, fields):\n body = {\n \"suggest\": {\n field: {\n \"prefix\": self.query_compiler.query.query_string,\n \"completion\": {\n \"field\": self.query_compiler.get_autocomplete_field_name(field),\n \"skip_duplicates\": True,\n },\n }\n for field in fields\n },\n \"_source\": False,\n }\n result = self._do_raw_search(body)\n return result[\"suggest\"]\n\n def _do_raw_search(self, body):\n if self.stop is not None:\n limit = self.stop - self.start\n else:\n limit = None\n\n if limit is None:\n raise QueryTooLarge(\n \"Query without limit will download entire index, aborting\"\n )\n elif limit > PAGE_SIZE:\n raise QueryTooLarge(\n \"query_page_size is %i, but %i items where requested\"\n % (PAGE_SIZE, limit)\n )\n\n params = {\n \"index\": self.backend.get_index_for_model(\n self.query_compiler.queryset.model\n ).name,\n \"body\": body,\n \"_source\": False,\n \"from_\": self.start,\n \"size\": limit or PAGE_SIZE,\n }\n\n # Send to Elasticsearch\n return self.backend.es.search(**params)\n\n def _do_search(self):\n \"Search without ever allowing scroll\"\n body = self._get_es_body()\n results = self._do_raw_search(body)\n hits = results[\"hits\"][\"hits\"]\n\n # Get results\n return self._get_results_from_hits(hits)\n\n def __len__(self):\n return self.count()\n\n\nclass SearchQueryCompiler(Elasticsearch6SearchQueryCompiler):\n FACET_TYPE_TERM = \"term\"\n FACET_TYPE_RANGE = \"range\"\n mapping_class = SearchMapping\n\n def __init__(\n self,\n queryset,\n query,\n es_filters=None,\n es_ordering=None,\n fields=None,\n operator=None,\n order_by_relevance=True,\n partial_match=True,\n ):\n self.es_filters = es_filters or {}\n self.es_ordering = es_ordering or []\n self.facet_table = get_facet_table()\n\n super().__init__(\n queryset,\n query,\n fields=fields,\n operator=operator,\n order_by_relevance=not bool(es_ordering) and order_by_relevance,\n partial_match=partial_match,\n )\n\n def check(self):\n if self.fields:\n with mock.patch.object(\n self, \"fields\", set(self.fields) - set(self.mapping.default_fields)\n ): # remove default_fields before checking, they are not allowed!\n super(SearchQueryCompiler, self).check()\n else: # perform the default checks if no fields are defined.\n super(SearchQueryCompiler, self).check()\n\n def clone(self, es_filters=None, es_ordering=None):\n es_filters_new = deepcopy(self.es_filters)\n if es_filters is not None:\n es_filters_new.update(es_filters)\n\n ordering = es_ordering if es_ordering is not None else self.es_ordering\n\n return self.__class__(\n self.queryset,\n self.query,\n es_filters=es_filters_new,\n es_ordering=ordering,\n fields=self.fields,\n order_by_relevance=not bool(ordering) and self.order_by_relevance,\n partial_match=self.partial_match,\n )\n\n def get_facet(self, facet_name):\n return self.facet_table.get(facet_name)\n\n @cached_property\n def _autocomplete_field_lookup(self):\n return dict(\n (field.get_attname(self.queryset.model), field)\n for field in self.queryset.model.get_autocomplete_search_fields()\n )\n\n def _get_autocomplete_field(self, field_attname):\n return self._autocomplete_field_lookup.get(field_attname)\n\n def get_autocomplete_field_name(self, field_attname):\n autocomplete_field = self._get_autocomplete_field(field_attname)\n return self.mapping.get_autocomplete_column_name(autocomplete_field)\n\n def get_suggestion_field_name(self, field_attname):\n suggestion_field = self._get_autocomplete_field(field_attname)\n return self.mapping.get_field_column_name(suggestion_field)\n\n @cached_property\n def _filterable_field_lookup(self):\n return dict(\n (field.get_attname(self.queryset.model), field)\n for field in self.queryset.model.get_filterable_search_fields()\n )\n\n def _get_filterable_field(self, field_attname):\n return self._filterable_field_lookup.get(field_attname)\n\n def get_field_name_for_path(self, path):\n field_match = SPIT_THAT_BITCH_RE.match(path)\n if field_match is not None:\n field_name = field_match.group(\"field_name\")\n addition = field_match.group(\"addition\")\n field = self._get_filterable_field(field_name)\n column_name = self.mapping.get_field_column_name(field)\n\n if addition:\n return \"%s.%s\" % (column_name, addition)\n\n return column_name\n\n def get_es_filters(self):\n es_filters = []\n for facet_name, values in self.es_filters.items():\n facet = self.get_facet(facet_name)\n if facet is None:\n continue\n\n facet_type = facet[\"type\"]\n\n if facet_type == self.FACET_TYPE_RANGE:\n\n range_query = []\n\n for value in values:\n start_pattern, end_pattern = value.split(\"-\", maxsplit=1)\n start = to_float(start_pattern)\n end = to_float(end_pattern)\n\n range_restriction = dict()\n\n if start:\n range_restriction[\"gte\"] = start\n if end:\n range_restriction[\"lt\"] = end\n\n range_query.append(\n {\n \"range\": {\n self.get_field_name_for_path(\n facet_name\n ): range_restriction\n }\n }\n )\n es_filters.append({\"bool\": {\"should\": range_query}})\n\n elif facet_type == self.FACET_TYPE_TERM:\n if len(values) > 1:\n es_filters.append(\n {\"terms\": {self.get_field_name_for_path(facet_name): values}}\n )\n else:\n value = values[0]\n es_filters.append(\n {\"match\": {self.get_field_name_for_path(facet_name): value}}\n )\n\n return es_filters\n\n def get_es_ordering(self):\n result = []\n for ordering in self.es_ordering:\n\n if isinstance(ordering, str):\n order = ORDERING_RE.match(ordering)\n if order is not None:\n order_by = order.group(\"order_by\")\n order_by_filter = self.mapping.get_field_column_name(\n self._get_filterable_field(order_by)\n )\n if order.group(\"sign\") == \"-\":\n result.append({order_by_filter: \"desc\"})\n else:\n result.append(order_by_filter)\n elif ordering:\n result.append(ordering)\n\n return result\n\n def get_unfiltered_query(self):\n inner_query = self.get_inner_query()\n filters = super().get_filters()\n\n if len(filters) == 1:\n return {\"bool\": {\"must\": inner_query, \"filter\": filters[0]}}\n elif len(filters) > 1:\n return {\"bool\": {\"must\": inner_query, \"filter\": filters}}\n else:\n return inner_query\n\n def get_filters(self):\n return self.get_es_filters() + super().get_filters()\n\n def get_sort(self):\n default_order = super().get_sort()\n if default_order:\n return self.get_es_ordering() + default_order\n\n return self.get_es_ordering()\n\n\nclass SearchBackend(Elasticsearch6SearchBackend):\n query_compiler_class = SearchQueryCompiler\n results_class = SearchResults\n mapping_class = SearchMapping\n\n def search_suggestions(self, query, model_or_queryset, fields):\n \"Suggest some search queries, based on the query passed\"\n if isinstance(model_or_queryset, QuerySet):\n queryset = model_or_queryset\n else:\n queryset = model_or_queryset.objects.all()\n\n # Search\n query_compiler_class = self.query_compiler_class\n search_query = query_compiler_class(queryset, query)\n\n # Check the query\n search_query.check()\n\n num_suggestions = settings.OSCAR_PRODUCTS_PER_PAGE\n result = self.results_class(self, search_query)[0:num_suggestions]\n return result.autocomplete(fields)\n","sub_path":"oscar_elasticsearch/search/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":16389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"285012307","text":"#!/usr/bin/env python3\n\nimport matplotlib.pyplot as plt\nfrom numpy import array,genfromtxt,average,gradient,histogram\nfrom os.path import exists\nfrom sys import argv,exit\n\nFile = \"1\"\nname = File+\".dat\"\ntmin = 10000000\ntmax = 14000000\n\ndata = genfromtxt(name).transpose()\n\nt = data[0]\ndT = data[1]\niT = data[2]\n\n# Function returns a corrected temperature value from thermometer voltage.\ndef corT(O):\n return ((220.6 * O) + 271.1)\n\n# Return sensitivity given by the quadratic calibration fit.\ndef sens(arg):\n return ( -(2.03e-7)*arg*arg + (2.07e-4)*arg - 6.86e-3 )\n\n\n# Correct by calibration values\nT = [ corT(s) for s in iT ]\nS = [ sens(s) for s in T ]\n\n# Numerically compute dT/dt, bin by 1024\n# Due to this binning, our array is 512 less at each end, so some magical 512s appear.\n# It's also probably obvious that I've only discovered list comprehensions in Python.\nbT = [ average([T[i] for i in range(j-512,j+512)]) for j in range(512,len(t)-512)]\ndTdt = gradient(bT)\n\n'''\nfor i in range(0,len(t)):\n if (t[i] < tmin):\n t[i] = None\n dT[i] = None\n T[i] = None\n if (t[i] > tmax):\n t[i] = None\n dT[i] = None\n T[i] = None\n'''\n# This allows determination of where to set tmin&tmax.\nplt.plot(t,dT,'.')\nplt.show()\n\n# Plot the thermogram.\ndQdT = [ ( -(dT[i])/(dTdt[i-512]*S[i]) ) for i in range(512,len(T)-512) ]\n\nplt.plot(T[512:-512],dQdT,'.')\nplt.xlabel(\"T [K]\")\nplt.ylabel(\"dQ/dT [J/K]\")\nplt.axis([250,290,-150,50])\nplt.savefig(\"thermogram-\"+File+\".pdf\",format=\"PDF\")\nplt.show()\nplt.clf()\n\n# Now follows a lot of crazy binning to make a readable plot.\nBinFactor = 64\nNQ = [average([dQdT[i] for i in range(j-BinFactor,j+BinFactor)]) for j in range(BinFactor,len(dQdT)-BinFactor)]\nNT = [average([T[i] for i in range(j-BinFactor,j+BinFactor)]) for j in range(BinFactor+512,len(T)-BinFactor-512)]\nplt.plot(NT,NQ,'.')\nplt.xlabel(\"T [K]\")\nplt.ylabel(\"dQ/dT [J/K]\")\nplt.axis([250,290,-150,50])\nplt.show()\n","sub_path":"calorimeter/dat/good/anal1.py","file_name":"anal1.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"626596066","text":"genres=['rock','R&B','Jazz','Metal','soul']\r\nb=[]\r\ni=0\r\na=genres[i]\r\nwhile(a!=\"soul\"):\r\n print(a)\r\n b.append(genres[i])\r\n i=i+1\r\n a=genres[i]\r\n\r\n\r\nprint(\"we are now outside loop\")\r\nprint(\"The vale of b is\", b)","sub_path":"prac2.py","file_name":"prac2.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"548581764","text":"# Copyright 2014 - Savoir-Faire Linux inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport copy\nimport json\n\n\nfrom surveil.api.datamodel.config import realm\nfrom surveil.tests.api import functionalTest\n\n\nclass TestRealmsController(functionalTest.FunctionalTest):\n\n def setUp(self):\n super(TestRealmsController, self).setUp()\n self.realms = [\n {\n 'realm_name': 'World',\n 'realm_members': 'Europe,America,Asia',\n 'default': 0\n },\n {\n 'realm_name': 'Anti-world',\n 'realm_members': 'void,black-hole',\n 'default': 1\n },\n ]\n self.mongoconnection.shinken.realms.insert(\n copy.deepcopy(self.realms)\n )\n\n def test_get_all_realms(self):\n response = self.get('/v2/config/realms')\n\n self.assert_count_equal_backport(\n json.loads(response.body.decode()),\n self.realms\n )\n self.assertEqual(response.status_int, 200)\n\n def test_get_one_realm(self):\n response = self.get('/v2/config/realms/World')\n\n self.assertEqual(\n json.loads(response.body.decode()),\n self.realms[0]\n )\n\n def test_create_realm(self):\n r = realm.Realm(\n realm_name='John',\n realm_members=\"marie,bob,joe\",\n default=1\n )\n\n self.post_json('/v2/config/realms', r.as_dict())\n\n self.assertIsNotNone(\n self.mongoconnection.shinken.realms.find_one(r.as_dict())\n )\n\n def test_delete_realm(self):\n self.assertIsNotNone(\n self.mongoconnection.shinken.realms.find_one(self.realms[0])\n )\n\n self.delete('/v2/config/realms/World')\n\n self.assertIsNone(\n self.mongoconnection.shinken.realms.find_one(self.realms[0])\n )\n\n def test_put_realm(self):\n self.assertEqual(\n self.mongoconnection.shinken.realms.find_one(\n {'realm_name': 'World'}\n )['realm_members'],\n 'Europe,America,Asia'\n )\n\n self.put_json(\n '/v2/config/realms/World',\n {\"realm_name\": \"World\",\n \"realm_members\": \"updated\",\n \"default\": 0}\n )\n\n self.assertEqual(\n self.mongoconnection.shinken.realms.find_one(\n {'realm_name': 'World'}\n )['realm_members'],\n 'updated'\n )\n","sub_path":"surveil/tests/api/controllers/v2/config/test_realms.py","file_name":"test_realms.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"511663122","text":"from google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext import db\nimport os\nimport logging\n\nimport model\n\n\nwebapp.template.register_template_library('lib.templatefilters')\n\nclass BaseHandler(webapp.RequestHandler):\n template_names = {}\n session = None\n \n def initialize(self, request, response):\n super(BaseHandler, self).initialize(request, response)\n self.session = self.request.environ['beaker.session']\n if 'user' in self.session:\n user_key = db.Key(self.session['user'])\n user_key = db.Key.from_path('User', user_key.id_or_name())\n self.user = model.User.get(user_key)\n logging.info(\"User: %s\", self.user.username)\n if self.user.isdisabled:\n logging.info(\"Logging disabled user '%s' out.\" % self.user.username)\n del self.session['user']\n self.session['logged_in'] = False\n self.session.save()\n else:\n self.user = None\n \n def GetTemplatePath(self, template):\n return os.path.join(os.path.dirname(__file__), \"..\", \"templates\", template)\n\n def RenderTemplate(self, template_name, template_values):\n self.response.out.write(\n template.render(self.GetTemplatePath(template_name),\n template_values))\n\n def GetTemplateValues(self, method):\n return {\n \"session\": self.session,\n \"user\": self.user,\n\t \"path_qs\": self.request.path_qs,\n\t \"logged_in\": self.session.get(\"logged_in\", False)\n }\n\n def get(self):\n self.RenderTemplate(self.template_names[\"get\"],\n self.GetTemplateValues(\"get\"))\n\n\ndef RequiresLogin(fun):\n def RequiresLoginDecorator(self, *args, **kwargs):\n if not self.session.get(\"logged_in\", False):\n self.redirect(\"/login\")\n return\n return fun(self, *args, **kwargs)\n return RequiresLoginDecorator\n\ndef RequiresModerator(fun):\n def RequiresModeratorDecorator(self, *args, **kwargs):\n if not self.session.get(\"logged_in\", False):\n self.redirect(\"/login\")\n return\n elif not self.user.ismoderator:\n self.response.set_status(403)\n self.RenderTemplate(\"permissiondenied.html\", self.GetTemplateValues(None))\n return\n return fun(self, *args, **kwargs)\n return RequiresModeratorDecorator\n\ndef RequiresAdmin(fun):\n def RequiresAdminDecorator(self, *args, **kwargs):\n if not self.session.get(\"logged_in\", False):\n self.redirect(\"/login\")\n return\n elif not self.user.isadmin:\n self.response.set_status(403)\n self.RenderTemplate(\"permissiondenied.html\", self.GetTemplateValues(None))\n return\n return fun(self, *args, **kwargs)\n return RequiresAdminDecorator\n\ndef nify_str(s):\n return s.replace(\"&\", \"\").replace(\"$\", \"\").replace(\"#\", \"\")\n","sub_path":"lib/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"398062167","text":"import tensorflow as tf\nimport os\n\nfrom models import generator, discriminator, flownet, initialize_flownet\nfrom loss_functions import intensity_loss, gradient_loss\nfrom v_utils import DataLoader, load, save, psnr_error\n\nimport cv2\nimport numpy as np\nfrom glob import glob\n\ndataset_name = 'vessel'\ntrain_folder = '/dmount/Vessel/drawing_anno/Patient/train/image'\ntrain_gtfolder = '/dmount/Vessel/drawing_anno/Patient/train/label'\ntest_folder = '/dmount/Vessel/drawing_anno/Patient/vessel/30_1_in'\ntest_gtfolder = '/dmount/Vessel/drawing_anno/Patient/vessel/30_1_out'\n\ntrain_dirs = glob(os.path.join(train_folder, '*.jpg'))\ntrain_gtdirs = glob(os.path.join(train_gtfolder, '*.jpg'))\ntest_dirs = glob(os.path.join(test_folder, '*.jpg'))\ntest_gtdirs = glob(os.path.join(test_gtfolder, '*.jpg'))\n\ntrain_dirs.sort()\ntrain_gtdirs.sort()\ntest_dirs.sort()\ntest_gtdirs.sort()\n\n\nbatch_size = 8\nepochs = 2000\niterations = (len(train_dirs)//batch_size + 1) * epochs\nheight, width = 256, 256\n\n\nl_num = 2\nalpha_num = 1\nlam_lp = 1.0\nlam_gdl = 1.0\n\ntrial = 4\n\n\nsummary_dir = 'v_summary/trial_{}'.format(trial)\nif not os.path.exists(summary_dir):\n os.makedirs(summary_dir)\n\nsnapshot_dir = 'v_snapshot/trial_{}'.format(trial)\nif not os.path.exists(snapshot_dir):\n os.makedirs(snapshot_dir)\n \n\nlr_bounds = [7000]\nlr = [0.0001, 1e-05]\n\ntrain_inimg = np.zeros((len(train_dirs), height, width, 3), dtype=np.float32)\ntrain_gtimg = np.zeros((len(train_dirs), height, width, 3), dtype=np.float32)\ntest_inimg = np.zeros((len(test_dirs), height, width, 3), dtype=np.float32)\ntest_gtimg = np.zeros((len(test_dirs), height, width, 3), dtype=np.float32)\n\nfor i in range(len(train_inimg)):\n img = cv2.imread(train_dirs[i])\n img = cv2.resize(img, (height, width))\n train_inimg[i] = (img / 127.5) - 1.0\n \n img = cv2.imread(train_gtdirs[i])\n img = cv2.resize(img, (height, width))\n train_gtimg[i] = (img / 127.5) - 1.0\n \nfor i in range(len(test_inimg)):\n img = cv2.imread(test_dirs[i])\n img = cv2.resize(img, (height, width))\n test_inimg[i] = (img / 127.5) - 1.0\n \n img = cv2.imread(test_gtdirs[i])\n img = cv2.resize(img, (height, width))\n test_gtimg[i] = (img / 127.5) - 1.0\n \n# define dataset\nwith tf.name_scope('dataset'):\n train_dataset = tf.data.Dataset.from_tensor_slices(train_inimg).repeat().batch(batch_size)\n train_gtdataset = tf.data.Dataset.from_tensor_slices(train_gtimg).repeat().batch(batch_size)\n\n test_dataset = tf.data.Dataset.from_tensor_slices(test_inimg).repeat().batch(batch_size)\n test_gtdataset = tf.data.Dataset.from_tensor_slices(test_gtimg).repeat().batch(batch_size)\n \n \"\"\"train_dataset = tf.data.Dataset.from_tensor_slices(train_inimg).repeat(epochs)\n train_gtdataset = tf.data.Dataset.from_tensor_slices(train_gtimg).repeat(epochs)\n\n test_dataset = tf.data.Dataset.from_tensor_slices(test_inimg).repeat(epochs)\n test_gtdataset = tf.data.Dataset.from_tensor_slices(test_gtimg).repeat(epochs)\n \"\"\"\n \n \"\"\"train_dataset = tf.data.Dataset.from_tensor_slices(train_inimg)\n train_dataset.repeat()\n train_dataset.batch(batch_size)\n \n train_gtdataset = tf.data.Dataset.from_tensor_slices(train_gtimg)\n train_gtdataset.repeat()\n train_gtdataset.batch(batch_size)\n\n test_dataset = tf.data.Dataset.from_tensor_slices(test_inimg)\n test_dataset.repeat()\n test_dataset.batch(batch_size)\n \n test_gtdataset = tf.data.Dataset.from_tensor_slices(test_gtimg)\n test_gtdataset.repeat()\n test_gtdataset.batch(batch_size)\"\"\"\n \n \n train_it = train_dataset.make_one_shot_iterator()\n train_gtit = train_gtdataset.make_one_shot_iterator()\n\n test_it = test_dataset.make_one_shot_iterator()\n test_gtit = test_gtdataset.make_one_shot_iterator()\n\n\n train_inputs = train_it.get_next()\n train_gt = train_gtit.get_next()\n\n test_inputs = test_it.get_next()\n test_gt = test_gtit.get_next()\n \n\n# define training generator function\nwith tf.variable_scope('generator', reuse=None):\n print('training = {}'.format(tf.get_variable_scope().name))\n train_outputs = generator(train_inputs, layers=4, output_channel=3)\n train_psnr_error = psnr_error(gen_frames=train_outputs, gt_frames=train_gt)\n\n# define testing generator function\nwith tf.variable_scope('generator', reuse=True):\n print('testing = {}'.format(tf.get_variable_scope().name))\n test_outputs = generator(test_inputs, layers=4, output_channel=3)\n test_psnr_error = psnr_error(gen_frames=test_outputs, gt_frames=test_gt)\n\n\n# define intensity loss\nif lam_lp != 0:\n lp_loss = intensity_loss(gen_frames=train_outputs, gt_frames=train_gt, l_num=l_num)\nelse:\n lp_loss = tf.constant(0.0, dtype=tf.float32)\n\n\n# define gdl loss\nif lam_gdl != 0:\n gdl_loss = gradient_loss(gen_frames=train_outputs, gt_frames=train_gt, alpha=alpha_num)\nelse:\n gdl_loss = tf.constant(0.0, dtype=tf.float32)\n\n \nwith tf.name_scope('training'):\n g_loss = tf.add_n([lp_loss * lam_lp, gdl_loss * lam_gdl], name='g_loss')\n\n g_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='g_step')\n g_lrate = tf.train.piecewise_constant(g_step, boundaries=lr_bounds, values=lr)\n g_optimizer = tf.train.AdamOptimizer(learning_rate=g_lrate, name='g_optimizer')\n g_vars = tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')\n\n g_train_op = g_optimizer.minimize(g_loss, global_step=g_step, var_list=g_vars, name='g_train_op')\n\n\n# add all to summaries\ntf.summary.scalar(tensor=train_psnr_error, name='train_psnr_error')\ntf.summary.scalar(tensor=test_psnr_error, name='test_psnr_error')\ntf.summary.scalar(tensor=g_loss, name='g_loss')\n#tf.summary.scalar(tensor=adv_loss, name='adv_loss')\n#tf.summary.scalar(tensor=dis_loss, name='dis_loss')\ntf.summary.image(tensor=train_outputs, name='train_outputs')\ntf.summary.image(tensor=train_gt, name='train_gt')\ntf.summary.image(tensor=test_outputs, name='test_outputs')\ntf.summary.image(tensor=test_gt, name='test_gt')\nsummary_op = tf.summary.merge_all()\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nwith tf.Session(config=config) as sess:\n # summaries\n summary_writer = tf.summary.FileWriter(summary_dir, graph=sess.graph)\n\n # initialize weights\n sess.run(tf.global_variables_initializer())\n print('Init successfully!')\n\n\n # tf saver\n saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=None)\n restore_var = [v for v in tf.global_variables()]\n loader = tf.train.Saver(var_list=restore_var)\n if os.path.isdir(snapshot_dir):\n ckpt = tf.train.get_checkpoint_state(snapshot_dir)\n if ckpt and ckpt.model_checkpoint_path:\n load(loader, sess, ckpt.model_checkpoint_path)\n else:\n print('No checkpoint file found.')\n else:\n load(loader, sess, snapshot_dir)\n\n _step, _loss, _summaries = 0, None, None\n \n while _step < iterations:\n print('Training generator...')\n _, _g_lr, _step, _lp_loss, _gdl_loss, _g_loss, _train_psnr, _summaries = sess.run(\n [g_train_op, g_lrate, g_step, lp_loss, gdl_loss, g_loss, train_psnr_error, summary_op])\n\n if _step % 10 == 0:\n print('GeneratorModel : Step {}, lr = {:.6f}'.format(_step, _g_lr))\n print(' Global Loss : ', _g_loss)\n print(' intensity Loss : ({:.4f} * {:.4f} = {:.4f})'.format(_lp_loss, lam_lp, _lp_loss * lam_lp))\n print(' gradient Loss : ({:.4f} * {:.4f} = {:.4f})'.format( _gdl_loss, lam_gdl, _gdl_loss * lam_gdl))\n print(' PSNR Error : ', _train_psnr)\n \n if _step % 1000 == 0:\n summary_writer.add_summary(_summaries, global_step=_step)\n print('Save summaries...')\n\n if _step % 5000 == 0:\n save(saver, sess, snapshot_dir, _step)\n\n \n print('Finish successfully!')\n save(saver, sess, snapshot_dir, _step)\n","sub_path":"Codes/gw_unet_train.py","file_name":"gw_unet_train.py","file_ext":"py","file_size_in_byte":7957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"351674238","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\ndef compute_derivative(scan, min_dist):\n distance = []\n for measurement in scan:\n distance.append(measurement[1])\n jumps = [ 0 ]\n for i in range(1, len(distance) - 1):\n l = distance[i-1]\n r = distance[i+1]\n if l > min_dist and r > min_dist:\n derivative = (r - l) / 2.0\n jumps.append(derivative)\n else:\n jumps.append(0)\n jumps.append(0) \n return jumps\n\ndef find_cylinders(scan, scan_derivative, jump, min_dist, max_dist):\n distance = []\n angle = []\n for measurement in scan:\n distance.append(measurement[1])\n angle.append(measurement[0])\n cylinder_list = []\n on_cylinder = False\n sum_ray, sum_depth, rays, sum_angle = 0.0, 0.0, 0.0, 0.0\n #mass_dist = []\n #min_dist = 0\n\n for i in range(len(scan_derivative)):\n if scan_derivative[i] < -jump and distance[i] > min_dist and distance[i] < max_dist:\n sum_ray, sum_depth, rays, sum_angle = 0.0, 0.0, 0.0, 0.0\n #mass_dist = []\n #min_dist = 0\n on_cylinder = True\n\n if on_cylinder is True:\n sum_ray += i\n sum_depth += distance[i]\n sum_angle += angle[i]\n rays += 1\n #mass_dist.append(distance[i])\n\n if scan_derivative[i] > jump and distance[i] > min_dist and rays > 2 and rays < 7:\n on_cylinder = False\n average_depth = sum_depth / rays\n average_angle = sum_angle / rays\n average_ray = int(sum_ray / rays)\n #min_dist = min(mass_dist)\n #print(min_dist)\n\n #cylinder_list.append((average_ray, distance[average_ray], angle[average_ray]))\n cylinder_list.append((average_ray, average_depth, average_angle))\n #cylinder_list.append((average_ray, min_dist, angle[average_ray]))\n\n return cylinder_list\n\ndef plot_dist(scan, derivate, cylinders):\n plt.clf()\n distance = []\n for measurement in scan:\n distance.append(measurement[1])\n\n plt.plot(distance, '-b')\n plt.plot(derivate, '-b')\n plt.scatter([c[0] for c in cylinders], [c[1] for c in cylinders],c='r', s=200)\n plt.pause(0.01)\n\nif __name__ == '__main__':\n first = True\n points_cloud = np.array([])\n file = open(\"log.txt\", \"r\")\n robot = {'time':[],'vr':[], 'vl':[],'x':[],'y':[],'yaw':[],'scan':[]}\n for line in file:\n line = line.split(' ')\n robot['time'].append(float(line[0]))\n robot['vr'].append(float(line[1]))\n robot['vl'].append(float(line[2]))\n robot['x'].append(float(line[3]))\n robot['y'].append(float(line[4]))\n robot['yaw'].append(float(line[5]))\n scan = []\n mass_dist = []\n mass_angle = []\n for i, var in enumerate(line[5:]):\n angle = i*3.14/180\n dist = float(var)/1000\n if dist != 0:\n mass_dist.append(dist)\n mass_angle.append(angle)\n scan.append([angle, dist])\n robot['scan'].append(scan)\n min_dist = 0.05\n max_dist = 2\n depth_jump = 0.5\n derivative = compute_derivative(scan, 0.05)\n cylinders = find_cylinders(scan, derivative, depth_jump, min_dist, max_dist)\n #plot_dist(scan, derivative, cylinders)\n\n\n for i in range(len(robot['time'])):\n plt.plot(robot['x'][i], robot['y'][i],'.g')\n plt.pause(0.001)\n## plt.clf()\n## plt.plot(mass_dist, '-b')\n## plt.pause(0.001)\n \n","sub_path":"new/read_log.py","file_name":"read_log.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"271655576","text":"import time\nfrom collections import defaultdict\nfrom bisect import insort\n\nfrom aalpy.automata import MarkovChain, MdpState, Mdp, McState\nfrom aalpy.learning_algs.stochastic_passive.CompatibilityChecker import HoeffdingCompatibility\nfrom aalpy.learning_algs.stochastic_passive.FPTA import create_fpta\n\n\nclass Alergia:\n def __init__(self, data, is_mdp=False, eps=0.005, compatibility_checker=None, print_info=False):\n assert eps == 'auto' or 0 < eps <= 2\n\n self.is_mdp = is_mdp\n self.print_info = print_info\n\n if eps == 'auto':\n eps = 10 / sum(len(d)-1 for d in data) # len - 1 to ignore initial output\n\n self.diff_checker = HoeffdingCompatibility(eps) if not compatibility_checker else compatibility_checker\n\n pta_start = time.time()\n\n self.t, self.a = create_fpta(data, is_mdp)\n\n pta_time = round(time.time() - pta_start, 2)\n if self.print_info:\n print(f'PTA Construction Time: {pta_time}')\n\n def compatibility_test(self, a, b):\n if a.output != b.output:\n return False\n\n if not a.children.values() or not b.children.values():\n return True\n\n if not self.diff_checker.check_difference(a, b):\n return False\n\n for el in set(a.children.keys()).intersection(b.children.keys()):\n if not self.compatibility_test(a.children[el], b.children[el]):\n return False\n\n return True\n\n def merge(self, q_r, q_b):\n t_q_b = self.get_blue_node(q_b)\n prefix_leading_to_state = q_b.prefix[:-1]\n to_update = self.a\n for p in prefix_leading_to_state:\n to_update = to_update.children[p]\n\n to_update.children[q_b.prefix[-1]] = q_r\n\n self.fold(q_r, t_q_b)\n\n def fold(self, q_r, q_b):\n for i, c in q_b.children.items():\n if i in q_r.children.keys():\n q_r.input_frequency[i] += c.input_frequency[i]\n self.fold(q_r.children[i], c)\n else:\n q_r.children[i] = c.copy()\n q_r.input_frequency[i] = q_b.input_frequency[i] # Todo, Martin please examine if this is correct,\n # without it you would get an error later on as child would exist without associated input freq\n\n def run(self):\n start_time = time.time()\n\n red = [self.a] # representative nodes and will be included in the final output model\n blue = self.a.succs() # intermediate successors scheduled for testing\n\n while blue:\n lex_min_blue = min(list(blue), key=lambda x: len(x.prefix))\n merged = False\n\n for q_r in red:\n if self.compatibility_test(self.get_blue_node(q_r), self.get_blue_node(lex_min_blue)):\n self.merge(q_r, lex_min_blue)\n merged = True\n break\n\n if not merged:\n insort(red, lex_min_blue)\n\n blue.clear()\n prefixes_in_red = [s.prefix for s in red]\n for r in red:\n for s in r.succs():\n if s.prefix not in prefixes_in_red:\n blue.append(s)\n\n assert sorted(red, key=lambda x: len(x.prefix)) == red\n\n self.normalize(red)\n\n for i, r in enumerate(red):\n r.state_id = f'q{i}'\n\n if self.print_info:\n print(f'Alergia Learning Time: {round(time.time() - start_time, 2)}')\n print(f'Alergia Learned {len(red)} state automaton.')\n return self.to_automaton(red)\n\n def normalize(self, red):\n red_sorted = sorted(list(red), key=lambda x: len(x.prefix))\n for r in red_sorted:\n if not self.is_mdp:\n total_output = sum(r.input_frequency.values())\n for i in r.input_frequency.keys():\n r.children_prob[i] = r.input_frequency[i] / total_output\n else:\n outputs_per_input = defaultdict(int)\n for io, freq in r.input_frequency.items():\n outputs_per_input[io[0]] += freq\n for io in r.input_frequency.keys():\n r.children_prob[io] = r.input_frequency[io] / outputs_per_input[io[0]]\n\n def get_blue_node(self, red_node):\n blue = self.t\n for p in red_node.prefix:\n blue = blue.children[p]\n return blue\n\n def to_automaton(self, red):\n s_c = MdpState if self.is_mdp else McState\n a_c = Mdp if self.is_mdp else MarkovChain\n\n states = []\n initial_state = None\n red_mdp_map = dict()\n for s in red:\n automaton_state = s_c(s.state_id, output=s.output)\n automaton_state.prefix = s.prefix\n states.append(automaton_state)\n red_mdp_map[tuple(s.prefix)] = automaton_state\n red_mdp_map[automaton_state.state_id] = s\n if not s.prefix:\n initial_state = automaton_state\n\n for s in states:\n red_eq = red_mdp_map[s.state_id]\n for io, c in red_eq.children.items():\n destination = red_mdp_map[tuple(c.prefix)]\n i = io[0] if self.is_mdp else io\n if self.is_mdp:\n s.transitions[i].append((destination, red_eq.children_prob[io]))\n else:\n if i not in red_eq.children_prob.keys():\n print('')\n s.transitions.append((destination, red_eq.children_prob[i]))\n\n return a_c(initial_state, states)\n\n\ndef run_Alergia(data, automaton_type, eps=0.005, compatibility_checker=None, print_info=False):\n \"\"\"\n Run Alergia or IOAlergia on provided data.\n\n Args:\n\n data: data either in a form [[I,I,I],[I,I,I],...] if learning Markov Chains or [[O,(I,O),(I,O)...],\n [O,(I,O_,...],..,] if learning MDPs (I represents input, O output).\n Note that in whole data first symbol of each entry should be the same (Initial output of the MDP/MC).\n\n eps: epsilon value if you are using default HoeffdingCompatibility. If it is set to 'auto' it will be computed\n as 10/(all steps in the data)\n\n automaton_type: either 'mdp' if you wish to learn an MDP, else 'mc' if you want to learn Markov Chain\n\n compatibility_checker: impl. of class CompatibilityChecker, HoeffdingCompatibility with eps value by default\n\n (note: not interchangeable, depends on data)\n print_info:\n\n Returns:\n\n mdp or markov chain\n \"\"\"\n assert automaton_type in {'mdp', 'mc'}\n alergia = Alergia(data, eps=eps, is_mdp=True if automaton_type == 'mdp' else False,\n compatibility_checker=compatibility_checker, print_info=print_info)\n model = alergia.run()\n return model\n","sub_path":"aalpy/learning_algs/stochastic_passive/Alergia.py","file_name":"Alergia.py","file_ext":"py","file_size_in_byte":6784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"40726477","text":"import os\nimport re\n\nimport pandas as pd\nimport twitter\nimport mojimoji\n\n\nimport global_function as global_func\n\n\ndef build_api(token_path):\n token_config = global_func.load_config(token_path)\n\n \"\"\"アクセストークンの読み込み.\"\"\"\n ConsumerKey = token_config['ConsumerKey']\n ConsumerSecret = token_config['ConsumerSecret']\n AccessToken = token_config['AccessToken']\n AccessTokenSecret = token_config['AccessTokenSecret']\n\n api = twitter.Twitter(auth=twitter.OAuth(AccessToken, AccessTokenSecret, ConsumerKey, ConsumerSecret), retry=True)\n return api\n\n\nclass TweetsGetter(object):\n \"\"\"twitterのtweetを集める(親クラス).\n\n # Arguments\n token_path: twitter apiのtoken configのpath\n max_count: apiの一回のリクエストで要求するtweet数\n ## set_default_params\n APIの返り値からcsvへの出力のkeyを設定\n\n # How to use\n getter = TweetsGetter(token_path, csv_dir)\n getter.set_csv_dir('data/text_data/crawler/nega')\n getter.set_info() # ここは継承ごとに変更\n getter.get_tweets()\n ==> csv_dir/'{}.csv'.format(root_tweet_id)に所得したreplyを保存\n\n \"\"\"\n def __init__(self, token_path, max_count=100):\n self._set_default_params()\n self.api = build_api(token_path)\n self.max_count = max_count\n\n def _set_default_params(self):\n self.key_list = ['created_at', 'id', 'screen_name', 'text']\n self.user_key_list = ['screen_name']\n\n def set_csv_dir(self, csv_dir):\n os.makedirs(csv_dir, exist_ok=True)\n self.csv_dir = csv_dir\n\n def get_tweets(self):\n \"\"\"\n Subclasses should override for any actions to run.\n \"\"\"\n\n self.df = pd.DataFrame()\n tweets = []\n \"\"\"\n self.apiを用いてデータを所得\n \"\"\"\n self._write_tweets_df(tweets)\n self.df.to_csv(self.csv_path, index=False)\n del self.df\n\n def _write_tweets_df(self, tweets):\n tweets_norm = [self._extract_data(tweet) for tweet in tweets]\n for tweet in tweets_norm:\n text = self._text_norm(tweet[-1])\n if text is None:\n continue\n tweet[-1] = text\n tmp = pd.DataFrame(tweet, self.key_list)\n self.df = self.df.append(tmp.T)\n\n def _extract_data(self, tweet):\n output_info = []\n for key in self.key_list:\n value = tweet['user'][key] if key in self.user_key_list else tweet[key]\n output_info.append(value)\n return output_info\n\n def _text_norm(self, text):\n \"\"\"\n 必要の応じてoverride\n \"\"\"\n text = text.lower()\n\n #text = neologdn.normalize(text, repeat=2)\n # アルファベット, 記号(全角→半角), #かな(半角→全角)#数字(全角→半角)\n #text = mojimoji.zen_to_han(text, kana=False)\n\n # ()でかこまれた文章を削除\n text = re.sub('\\(.*\\)', '', text)\n text = re.sub('\\【.*\\】', '', text)\n text = re.sub('\\-.*\\-', '', text)\n text = re.sub('\\『.*\\』', '', text)\n\n # ~~でかこまれた文章を削除\n text = re.sub('\\~.*\\~', '', text)\n # 改行を削除\n text = re.sub('\\n', '', text)\n\n # 特定のtweetを削除\n # RT\n if text[:2] == 'rt':\n return None\n\n # URL\n text = re.sub(r'https?://[\\w/:%#\\$&\\?\\(\\)~\\.=\\+\\-…]+', \"\", text)\n # ユーザ名\n text = re.sub(r'@[\\w/:%#\\$&\\?\\(\\)~\\.=\\+\\-…]+', \"\", text)\n # ハッシュタグ\n text = re.sub(r'#[\\w/:%#\\$&\\?\\(\\)~\\.=\\+\\-…]+', \"\", text)\n # unicode非対応の文字の削除\n symbol = re.sub(r'[\\u0000-\\uE0FFF]', \"\", text)\n if not symbol == \"\":\n text = re.sub(\"[%s]\" % symbol, \"\", text)\n\n # 空白を削除\n text = text.strip()\n text = neologdn.normalize(text, repeat=2)\n return text\n\n\nclass UserTweetsGetter(TweetsGetter):\n \"\"\"twitterの特定のUser_name(screen_name)のtweetを集める.\n\n # Arguments\n token_path: twitter apiのtoken configのpath\n csv_dir: csvの保存先(2回目以降の場合はそのcsvの親のdiretoryのpath)\n max_count: apiの一回のリクエストで要求するtweet数\n ## set_default_params\n APIの返り値からcsvへの出力のkeyを設定\n\n # How to use\n getter = UserTweetsGetter(token_path, csv_dir)\n getter.set_csv_dir('data/text_data/crawler/nega')\n getter.set_user('peachgan_r6s')\n getter.get_tweets()\n ==> csv_dir/'{}.csv'.format(user_name)に所得したtweetsを保存\n\n \"\"\"\n def __init__(self, token_path, max_count=100):\n super(UserTweetsGetter, self).__init__(token_path, max_count)\n\n def set_user(self, screen_name):\n self.screen_name = screen_name\n\n self.csv_path = os.path.join(self.csv_dir, '{}.csv'.format(screen_name))\n if os.path.isfile(self.csv_path): # resume twitter crawler\n self.base_df = pd.read_csv(self.csv_path)\n self.since_id = self.base_df['id'].iat[0]\n self.resume_flag = True\n else: # first crawler\n self.since_id = None\n self.resume_flag = False\n\n def get_tweets(self):\n self.df = pd.DataFrame()\n max_id = None\n while True:\n min_id, tweets_num = self._get_tweets_core(max_id)\n max_id = min_id - 1\n if tweets_num < self.max_count:\n break\n\n if self.resume_flag:\n self.df = pd.concat([self.df, self.base_df])\n\n self.df = self.df.dropna(subset=['text'])\n self.df = self.df[self.df['text'] != '']\n self.df.to_csv(self.csv_path, index=False)\n del self.df\n\n def _get_tweets_core(self, max_id=None):\n tweets = self._get_user_timeline(max_id)\n tweets_num = len(tweets)\n if tweets_num == 0:\n min_id = 0\n else:\n min_id = tweets[-1]['id']\n self._write_tweets_df(tweets)\n\n return min_id, tweets_num\n\n def _get_user_timeline(self, max_id=None):\n # 参考()\n if self.since_id is None: # screen_nameの初めてのsearch\n if max_id is None: # 1回目のsearch\n tweets = self.api.statuses.user_timeline(screen_name=self.screen_name,\n count=self.max_count)\n else: # 2回目のsearch\n tweets = self.api.statuses.user_timeline(screen_name=self.screen_name,\n max_id=max_id, count=self.max_count)\n else: # search_nameの二回目以降のsearch\n if max_id is None: # 1回目のsearch\n tweets = self.api.statuses.user_timeline(screen_name=self.screen_name,\n since_id=self.since_id,\n count=self.max_count)\n else:\n tweets = self.api.statuses.user_timeline(screen_name=self.screen_name,\n since_id=self.since_id,\n max_id=max_id,\n count=self.max_count)\n return tweets\n\n def _text_norm(self, text):\n text = text.lower()\n\n # アルファベット, 記号(全角→半角), #かな(半角→全角)#数字(全角→半角)\n #text = mojimoji.zen_to_han(text, kana=False)\n\n # ()でかこまれた文章を削除\n text = re.sub('\\(.*\\)', '', text)\n text = re.sub('\\【.*\\】', '', text)\n text = re.sub('\\-.*\\-', '', text)\n text = re.sub('\\『.*\\』', '', text)\n\n # ~~でかこまれた文章を削除\n text = re.sub('\\~.*\\~', '', text)\n # 改行を削除\n text = re.sub('\\n', '', text)\n\n # 特定のtweetを削除\n # RT\n if text[:2] == 'rt':\n return None\n # リプライ\n if text[0] == '@':\n return None\n\n # URL\n text = re.sub(r'https?://[\\w/:%#\\$&\\?\\(\\)~\\.=\\+\\-…]+', \"\", text)\n # ユーザ名\n text = re.sub(r'@[\\w/:%#\\$&\\?\\(\\)~\\.=\\+\\-…]+', \"\", text)\n # ハッシュタグ\n text = re.sub(r'#[\\w/:%#\\$&\\?\\(\\)~\\.=\\+\\-…]+', \"\", text)\n # unicode非対応の文字の削除\n symbol = re.sub(r'[\\u0000-\\uE0FFF]', \"\", text)\n if not symbol == \"\":\n text = re.sub(\"[%s]\" % symbol, \"\", text)\n\n # 空白削除\n text = text.strip()\n text = neologdn.normalize(text, repeat=2)\n return text\n\n\nclass ReplyTweetsGetter(TweetsGetter):\n \"\"\"twitterの特定のtweetの対するリプライを集める.\n\n # Arguments\n token_path: twitter apiのtoken configのpath\n max_count: apiの一回のリクエストで要求するtweet数\n ## set_default_params\n APIの返り値からcsvへの出力のkeyを設定\n\n # How to use\n getter = ReplyTweetsGetter(token_path, csv_dir)\n getter.set_csv_dir('data/text_data/crawler/nega')\n getter.set_root_tweet(1115797923499892736)\n getter.get_tweets()\n ==> csv_dir/'{}.csv'.format(root_tweet_id)に所得したreplyを保存\n\n \"\"\"\n def __init__(self, token_path, max_count=100):\n super(ReplyTweetsGetter, self).__init__(token_path, max_count)\n\n def set_root_tweet(self, tweet_id):\n self.root_tweet_id = tweet_id\n self.csv_path = os.path.join(self.csv_dir, '{}.csv'.format(self.root_tweet_id))\n\n def get_tweets(self):\n self.df = pd.DataFrame()\n root_tweet = self.api.statuses.show(id=self.root_tweet_id)\n reply_list = self._get_replys(root_tweet)\n if len(reply_list) == 0:\n return\n\n self._write_tweets_df(reply_list)\n for reply in reply_list:\n reply_reply_list = self._get_replys(reply)\n if len(reply_reply_list) == 0:\n continue\n self._write_tweets_df(reply_reply_list)\n\n self.df = self.df.dropna(subset=['text'])\n self.df = self.df[self.df['text'] != '']\n self.df.to_csv(self.csv_path, index=False)\n del self.df\n\n def _get_replys(self, tweet):\n reply_list = []\n query = \"to:\" + tweet['user']['screen_name']\n tweet_id = tweet['id']\n max_id = None\n while True:\n responce_list = self._get_reply_core(query, tweet_id, max_id)\n if len(responce_list) == 0:\n break\n for responce in responce_list:\n if responce['in_reply_to_status_id'] == tweet_id:\n reply_list.append(responce)\n if len(responce_list) < self.max_count:\n break\n max_id = responce_list[-1]['id'] - 1\n\n return reply_list\n\n def _get_reply_core(self, query, since_id, max_id=None):\n if max_id is None: # 1回目のsearch\n tweets = self.api.search.tweets(q=query,\n since_id=since_id,\n count=self.max_count)['statuses']\n else: # 2回目のsearch\n tweets = self.api.search.tweets(q=query,\n since_id=since_id,\n max_id=max_id,\n count=self.max_count)['statuses']\n\n return tweets\n","sub_path":"src/utils/api_function.py","file_name":"api_function.py","file_ext":"py","file_size_in_byte":11615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"416210182","text":"\n\nimport os\nimport os.path\nimport numpy as np\nimport random\n\nclass ImageClass():\n \"Stores the paths to images for a given class\"\n\n def __init__(self, name, image_paths):\n self.name = name\n self.image_paths = image_paths\n\n def __str__(self):\n return self.name + ', ' + str(len(self.image_paths)) + ' images'\n\n def __len__(self):\n return len(self.image_paths)\n\n\ndef get_image_paths(facedir):\n image_paths = []\n if os.path.isdir(facedir):\n images = os.listdir(facedir)\n image_paths = [os.path.join(facedir,img) for img in images]\n return image_paths\n\n\ndef get_dataset(paths, has_class_directories=True):\n dataset = []\n for path in paths.split(':'):\n path_exp = os.path.expanduser(path)\n classes = os.listdir(path_exp)\n classes.sort()\n nrof_classes = len(classes)\n for i in range(nrof_classes):\n class_name = classes[i]\n facedir = os.path.join(path_exp, class_name)\n image_paths = get_image_paths(facedir)\n dataset.append(ImageClass(class_name, image_paths))\n\n return dataset\n\n\ndef split_dataset(dataset, split_ratio, mode):\n if mode=='SPLIT_CLASSES':\n nrof_classes = len(dataset)\n class_indices = np.arange(nrof_classes)\n np.random.shuffle(class_indices)\n split = int(round(nrof_classes*split_ratio))\n train_set = [dataset[i] for i in class_indices[0:split]]\n test_set = [dataset[i] for i in class_indices[split:-1]]\n elif mode=='SPLIT_IMAGES':\n train_set = []\n test_set = []\n min_nrof_images = 2\n for cls in dataset:\n paths = cls.image_paths\n np.random.shuffle(paths)\n split = int(round(len(paths)*split_ratio))\n if split> Running simplebilty <<\", file=sys.stderr)\n if args.model:\n print(\"loading model from file {}\".format(args.model), file=sys.stderr)\n tagger = load(args)\n else:\n tagger = SimpleBiltyTagger(args.in_dim,\n args.h_dim,\n args.c_in_dim,\n args.h_layers,\n embeds_file=args.embeds,\n activation=args.ac,\n noise_sigma=args.sigma)\n\n if args.train:\n ## read data\n train_X, train_Y = tagger.get_train_data(args.train)\n\n\n if args.dev:\n dev_X, dev_Y = tagger.get_data_as_indices(args.dev)\n\n tagger.fit(train_X, train_Y, args.iters, args.trainer, learning_rate=args.learning_rate, seed=args.dynet_seed, word_dropout_rate=args.word_dropout_rate)\n if args.save:\n save(tagger, args)\n\n if args.test:\n stdout = sys.stdout\n\n sys.stderr.write('\\nTesting\\n')\n sys.stderr.write('*******\\n')\n test_X, test_Y = tagger.get_data_as_indices(args.test)\n correct, total = tagger.evaluate(test_X, test_Y)\n\n print(\"\\ntest accuracy: %.4f\" % (correct/total), file=sys.stderr)\n print((\"Done. Took {0:.2f} seconds.\".format(time.time()-start)),file=sys.stderr)\n sys.stdout = stdout\n\n if args.ac:\n activation=args.ac.__name__\n else:\n activation=\"None\"\n print(\"Info: biLSTM\\n\\t\" + \"\\n\\t\".join([\"{}: {}\".format(a, v) for a, v in vars(args).items()\n if a not in [\"train\", \"test\", \"dev\", \"pred_layer\"]]), file=sys.stderr)\n\n if args.save_embeds:\n tagger.save_embeds(args.save_embeds)\n\ndef load(args):\n \"\"\"\n load a model from file; specify the .model file, it assumes the *pickle file in the same location\n \"\"\"\n myparams = pickle.load(open(args.model+\".pickle\", \"rb\"))\n tagger = SimpleBiltyTagger(myparams[\"in_dim\"],\n myparams[\"h_dim\"],\n myparams[\"c_in_dim\"],\n myparams[\"h_layers\"],\n activation=myparams[\"activation\"])\n tagger.set_indices(myparams[\"w2i\"],myparams[\"c2i\"],myparams[\"tag2idx\"])\n tagger.predictors, tagger.char_rnn, tagger.wembeds, tagger.cembeds = \\\n tagger.build_computation_graph(myparams[\"num_words\"],\n myparams[\"num_chars\"])\n tagger.model.populate(args.model)\n print(\"model loaded: {}\".format(args.model), file=sys.stderr)\n return tagger\n\ndef save(nntagger, args):\n \"\"\"\n save a model; dynet only saves the parameters, need to store the rest separately\n \"\"\"\n outdir = args.save\n modelname = outdir + \".model\"\n nntagger.model.save(modelname)\n import pickle\n myparams = {\"num_words\": len(nntagger.w2i),\n \"num_chars\": len(nntagger.c2i),\n \"w2i\": nntagger.w2i,\n \"c2i\": nntagger.c2i,\n \"tag2idx\": nntagger.tag2idx,\n \"activation\": nntagger.activation,\n \"in_dim\": nntagger.in_dim,\n \"h_dim\": nntagger.h_dim,\n \"c_in_dim\": nntagger.c_in_dim,\n \"h_layers\": nntagger.h_layers\n }\n pickle.dump(myparams, open( modelname+\".pickle\", \"wb\" ) )\n print(\"model stored: {}\".format(modelname), file=sys.stderr)\n\n\nclass SimpleBiltyTagger(object):\n\n def __init__(self,in_dim,h_dim,c_in_dim,h_layers,embeds_file=None,activation=dynet.tanh, noise_sigma=0.1):\n self.w2i = {} # word to index mapping\n self.c2i = {} # char to index mapping\n self.tag2idx = {} # tag to tag_id mapping\n self.model = dynet.ParameterCollection() #init model\n self.in_dim = in_dim\n self.h_dim = h_dim\n self.c_in_dim = c_in_dim\n self.activation = activation\n self.noise_sigma = noise_sigma\n self.h_layers = h_layers\n self.predictors = {\"inner\": [], \"output_layers_dict\": {}, \"task_expected_at\": {} } # the inner layers and predictors\n self.wembeds = None # lookup: embeddings for words\n self.cembeds = None # lookup: embeddings for characters\n self.embeds_file = embeds_file\n self.char_rnn = None # RNN for character input\n\n\n def pick_neg_log(self, pred, gold):\n return -dynet.log(dynet.pick(pred, gold))\n\n def set_indices(self, w2i, c2i, tag2idx):\n self.tag2idx= tag2idx\n self.w2i = w2i\n self.c2i = c2i\n\n def fit(self, train_X, train_Y, num_iterations, train_algo, learning_rate=0, seed=None, word_dropout_rate=0.25):\n \"\"\"\n train the tagger\n \"\"\"\n print(\"read training data\",file=sys.stderr)\n\n if seed:\n print(\">>> using seed: \", seed, file=sys.stderr)\n random.seed(seed) #setting random seed\n\n # init lookup parameters and define graph\n print(\"build graph\",file=sys.stderr)\n \n num_words = len(self.w2i)\n num_chars = len(self.c2i)\n \n self.predictors, self.char_rnn, self.wembeds, self.cembeds = self.build_computation_graph(num_words, num_chars)\n\n training_algo = TRAINER_MAP[train_algo]\n\n if learning_rate > 0:\n trainer = training_algo(self.model, learning_rate=learning_rate)\n else:\n trainer = training_algo(self.model)\n\n # if we use word dropout keep track of counts\n if word_dropout_rate > 0.0:\n widCount = Counter()\n for sentence, _ in train_X:\n widCount.update([w for w in sentence])\n\n assert(len(train_X)==len(train_Y))\n train_data = list(zip(train_X,train_Y))\n\n for cur_iter in range(num_iterations):\n total_loss=0.0\n total_tagged=0.0\n random.shuffle(train_data)\n for ((word_indices,char_indices),y) in train_data:\n if word_dropout_rate > 0.0:\n word_indices = [self.w2i[\"_UNK\"] if\n (random.random() > (widCount.get(w)/(word_dropout_rate+widCount.get(w))))\n else w for w in word_indices]\n output = self.predict(word_indices, char_indices, train=True)\n\n loss1 = dynet.esum([self.pick_neg_log(pred,gold) for pred, gold in zip(output, y)])\n lv = loss1.value()\n total_loss += lv\n total_tagged += len(word_indices)\n\n loss1.backward()\n trainer.update()\n\n print(\"iter {2} {0:>12}: {1:.2f}\".format(\"total loss\",total_loss/total_tagged,cur_iter), file=sys.stderr)\n\n def build_computation_graph(self, num_words, num_chars):\n \"\"\"\n build graph and link to parameters\n \"\"\"\n # initialize the word embeddings and the parameters\n cembeds = None\n if self.embeds_file:\n print(\"loading embeddings\", file=sys.stderr)\n embeddings, emb_dim = load_embeddings_file(self.embeds_file)\n assert(emb_dim==self.in_dim)\n num_words=len(set(embeddings.keys()).union(set(self.w2i.keys()))) # initialize all with embeddings\n # init model parameters and initialize them\n wembeds = self.model.add_lookup_parameters((num_words, self.in_dim),init=dynet.ConstInitializer(0.01))\n\n if self.c_in_dim > 0:\n cembeds = self.model.add_lookup_parameters((num_chars, self.c_in_dim),init=dynet.ConstInitializer(0.01))\n \n init=0\n l = len(embeddings.keys())\n for word in embeddings.keys():\n # for those words we have already in w2i, update vector, otherwise add to w2i (since we keep data as integers)\n if word in self.w2i:\n wembeds.init_row(self.w2i[word], embeddings[word])\n else:\n self.w2i[word]=len(self.w2i.keys()) # add new word\n wembeds.init_row(self.w2i[word], embeddings[word])\n init+=1\n print(\"initialized: {}\".format(init), file=sys.stderr)\n\n else:\n wembeds = self.model.add_lookup_parameters((num_words, self.in_dim),init=dynet.ConstInitializer(0.01))\n if self.c_in_dim > 0:\n cembeds = self.model.add_lookup_parameters((num_chars, self.c_in_dim),init=dynet.ConstInitializer(0.01))\n\n #make it more flexible to add number of layers as specified by parameter\n layers = [] # inner layers\n\n for layer_num in range(0,self.h_layers):\n\n if layer_num == 0:\n if self.c_in_dim > 0:\n f_builder = dynet.CoupledLSTMBuilder(1, self.in_dim+self.c_in_dim*2, self.h_dim, self.model) # in_dim: size of each layer\n b_builder = dynet.CoupledLSTMBuilder(1, self.in_dim+self.c_in_dim*2, self.h_dim, self.model) \n else:\n f_builder = dynet.CoupledLSTMBuilder(1, self.in_dim, self.h_dim, self.model)\n b_builder = dynet.CoupledLSTMBuilder(1, self.in_dim, self.h_dim, self.model)\n layers.append(BiRNNSequencePredictor(f_builder, b_builder)) #returns forward and backward sequence\n else:\n # add inner layers (if h_layers >1)\n f_builder = dynet.LSTMBuilder(1, self.h_dim, self.h_dim, self.model)\n b_builder = dynet.LSTMBuilder(1, self.h_dim, self.h_dim, self.model)\n layers.append(BiRNNSequencePredictor(f_builder,b_builder))\n\n # store at which layer to predict task\n\n task_num_labels= len(self.tag2idx)\n output_layer = FFSequencePredictor(Layer(self.model, self.h_dim*2, task_num_labels, dynet.softmax))\n\n if self.c_in_dim > 0:\n char_rnn = BiRNNSequencePredictor(dynet.CoupledLSTMBuilder(1, self.c_in_dim, self.c_in_dim, self.model), dynet.CoupledLSTMBuilder(1, self.c_in_dim, self.c_in_dim, self.model))\n else:\n char_rnn = None\n\n predictors = {}\n predictors[\"inner\"] = layers\n predictors[\"output_layers_dict\"] = output_layer\n predictors[\"task_expected_at\"] = self.h_layers\n\n return predictors, char_rnn, wembeds, cembeds\n\n def get_features(self, words):\n \"\"\"\n from a list of words, return the word and word char indices\n \"\"\"\n word_indices = []\n word_char_indices = []\n for word in words:\n if word in self.w2i:\n word_indices.append(self.w2i[word])\n else:\n word_indices.append(self.w2i[\"_UNK\"])\n\n if self.c_in_dim > 0:\n chars_of_word = [self.c2i[\"\"]]\n for char in word:\n if char in self.c2i:\n chars_of_word.append(self.c2i[char])\n else:\n chars_of_word.append(self.c2i[\"_UNK\"])\n chars_of_word.append(self.c2i[\"\"])\n word_char_indices.append(chars_of_word)\n return word_indices, word_char_indices\n \n\n def get_data_as_indices(self, file_name):\n \"\"\"\n X = list of (word_indices, word_char_indices)\n Y = list of tag indices\n \"\"\"\n X, Y = [],[]\n org_X, org_Y = [], []\n\n for (words, tags) in read_conll_file(file_name):\n word_indices, word_char_indices = self.get_features(words)\n tag_indices = [self.tag2idx.get(tag) for tag in tags]\n X.append((word_indices,word_char_indices))\n Y.append(tag_indices)\n org_X.append(words)\n org_Y.append(tags)\n return X, Y #, org_X, org_Y - for now don't use\n\n\n def predict(self, word_indices, char_indices, train=False):\n \"\"\"\n predict tags for a sentence represented as char+word embeddings\n \"\"\"\n dynet.renew_cg() # new graph\n\n char_emb = []\n rev_char_emb = []\n\n wfeatures = [self.wembeds[w] for w in word_indices]\n\n if self.c_in_dim > 0:\n # get representation for words\n for chars_of_token in char_indices:\n char_feats = [self.cembeds[c] for c in chars_of_token]\n # use last state as word representation\n f_char, b_char = self.char_rnn.predict_sequence(char_feats, char_feats)\n last_state = f_char[-1]\n rev_last_state = b_char[-1]\n char_emb.append(last_state)\n rev_char_emb.append(rev_last_state)\n\n features = [dynet.concatenate([w,c,rev_c]) for w,c,rev_c in zip(wfeatures,char_emb,rev_char_emb)]\n else:\n features = wfeatures\n \n if train: # only do at training time\n features = [dynet.noise(fe,self.noise_sigma) for fe in features]\n\n output_expected_at_layer = self.h_layers\n output_expected_at_layer -=1\n\n # go through layers\n # input is now combination of w + char emb\n prev = features\n prev_rev = features\n num_layers = self.h_layers\n for i in range(0,num_layers):\n predictor = self.predictors[\"inner\"][i]\n forward_sequence, backward_sequence = predictor.predict_sequence(prev, prev_rev)\n if i > 0 and self.activation:\n # activation between LSTM layers\n forward_sequence = [self.activation(s) for s in forward_sequence]\n backward_sequence = [self.activation(s) for s in backward_sequence]\n\n if i == output_expected_at_layer:\n output_predictor = self.predictors[\"output_layers_dict\"]\n concat_layer = [dynet.concatenate([f, b]) for f, b in zip(forward_sequence,reversed(backward_sequence))]\n\n if train and self.noise_sigma > 0.0:\n concat_layer = [dynet.noise(fe,self.noise_sigma) for fe in concat_layer]\n output = output_predictor.predict_sequence(concat_layer)\n return output\n\n prev = forward_sequence\n prev_rev = backward_sequence\n\n raise Exception(\"oops should not be here\")\n return None\n\n def evaluate(self, test_X, test_Y):\n \"\"\"\n compute accuracy on a test file\n \"\"\"\n correct = 0\n total = 0.0\n\n for i, ((word_indices, word_char_indices), gold_tag_indices) in enumerate(zip(test_X, test_Y)):\n\n output = self.predict(word_indices, word_char_indices)\n predicted_tag_indices = [np.argmax(o.value()) for o in output] \n\n correct += sum([1 for (predicted, gold) in zip(predicted_tag_indices, gold_tag_indices) if predicted == gold])\n total += len(gold_tag_indices)\n\n return correct, total\n\n\n\n def get_train_data(self, train_data):\n \"\"\"\n transform training data to features (word indices)\n map tags to integers\n \"\"\"\n X = []\n Y = []\n\n # word 2 indices and tag 2 indices\n w2i = {} # word to index\n c2i = {} # char to index\n tag2idx = {} # tag2idx\n\n w2i[\"_UNK\"] = 0 # unk word / OOV\n c2i[\"_UNK\"] = 0 # unk char\n c2i[\"\"] = 1 # word start\n c2i[\"\"] = 2 # word end index\n \n \n num_sentences=0\n num_tokens=0\n for instance_idx, (words, tags) in enumerate(read_conll_file(train_data)):\n instance_word_indices = [] #sequence of word indices\n instance_char_indices = [] #sequence of char indices\n instance_tags_indices = [] #sequence of tag indices\n\n for i, (word, tag) in enumerate(zip(words, tags)):\n\n # map words and tags to indices\n if word not in w2i:\n w2i[word] = len(w2i)\n instance_word_indices.append(w2i[word])\n\n if self.c_in_dim > 0:\n chars_of_word = [c2i[\"\"]]\n for char in word:\n if char not in c2i:\n c2i[char] = len(c2i)\n chars_of_word.append(c2i[char])\n chars_of_word.append(c2i[\"\"])\n instance_char_indices.append(chars_of_word)\n\n if tag not in tag2idx:\n tag2idx[tag]=len(tag2idx)\n\n instance_tags_indices.append(tag2idx.get(tag))\n\n num_tokens+=1\n\n num_sentences+=1\n\n X.append((instance_word_indices, instance_char_indices)) # list of word indices, for every word list of char indices\n Y.append(instance_tags_indices)\n\n\n print(\"%s sentences %s tokens\" % (num_sentences, num_tokens), file=sys.stderr)\n print(\"%s w features, %s c features \" % (len(w2i),len(c2i)), file=sys.stderr)\n if self.c_in_dim == 0:\n print(\"char features disabled\", file=sys.stderr)\n\n assert(len(X)==len(Y))\n\n # store mappings of words and tags to indices\n self.set_indices(w2i, c2i, tag2idx)\n\n return X, Y\n\n\nclass MyNNTaggerArgumentOptions(object):\n def __init__(self):\n pass\n\n ### functions for checking arguments\n def acfunct(arg):\n \"\"\" check for allowed argument for --ac option \"\"\"\n try:\n functions = [dynet.rectify, dynet.tanh]\n functions = { function.__name__ : function for function in functions}\n functions[\"None\"] = None\n return functions[str(arg)]\n except:\n raise argparse.ArgumentTypeError(\"String {} does not match required format\".format(arg,))\n\n\n\nif __name__==\"__main__\":\n main()\n","sub_path":"src/simplebilty.py","file_name":"simplebilty.py","file_ext":"py","file_size_in_byte":21028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"181770186","text":"import tensorflow as tf\n\n#Fetch\ninput1 = tf.constant(3.0)\ninput2 = tf.constant(2.0)\ninput3 = tf.constant(5.0)\n\nadd = tf.add(input2, input3)\nmul = tf.multiply(input1, add)\n\nwith tf.Session() as sess:\n result = sess.run([mul, add])\n print(result)\n\n\n#Feed\nin1 = tf.placeholder(tf.float32)\nin2 = tf.placeholder(tf.float32)\nput = tf.multiply(in1, in2)\n\nwith tf.Session() as sess:\n print(sess.run(put, feed_dict={in1:[7.0],in2:[2.0]}))\n","sub_path":"tensorflow-learning/fetch-feed.py","file_name":"fetch-feed.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"456894249","text":"import numpy as np\nfrom keras.models import Sequential, Model, load_model\nfrom keras.layers import Dense, Dropout, Input, Lambda,Activation, Concatenate\nfrom keras.layers.merge import Add, Multiply, multiply\nfrom tensorflow.python.client import device_lib\nfrom keras.optimizers import Adam\nimport keras.backend as K\nimport copy\n\nimport tensorflow as tf\n\nimport random\nbufferLength = 5\n\nclass ActorCritic:\n def __init__(self, env, sess, is_endless = False, load_model = False):\n self.env = env\n self.sess = sess\n self.is_endless = is_endless\n self.load_model = load_model\n self.critic_lr = 0.0003\n self.actor_lr = 0.0002\n self.critic_opt = tf.train.GradientDescentOptimizer(0)\n self.actor_opt = tf.train.GradientDescentOptimizer(0)\n self.target_critic_opt = tf.train.GradientDescentOptimizer(0)\n self.target_actor_opt = tf.train.GradientDescentOptimizer(0)\n self.mask_small_value = False\n self.gamma = .9\n self.tau = .125\n self.ita = .125\n self.memorySize = 2000\n self.batchSize = 40\n self.averageR = -0.5\n self.gradient_clip_thres = 2\n self.temp_critic_opt = tf.train.GradientDescentOptimizer(self.critic_lr/self.batchSize)\n self.temp_actor_opt = tf.train.GradientDescentOptimizer(self.actor_lr/self.batchSize)\n self.updateInterval = 40\n self.trainCounter = 0\n self.memory = []\n self.priority = np.array([])\n self.actor_state_input, self.actor_task_input, self.actor_model = self.create_actor_model(self.actor_opt)\n self.temp_actor_state_input, self.temp_actor_task_input, self.temp_actor_model = self.create_actor_model(\n self.temp_actor_opt)\n _, _, self.target_actor_model = self.create_actor_model(self.target_actor_opt)\n #actor_model_weights = self.actor_model.trainable_weights\n\n self.critic_state_input, \\\n self.critic_model = self.create_critic_model(self.critic_opt)\n self.temp_critic_state_input, \\\n self.temp_critic_model = self.create_critic_model(self.temp_critic_opt)\n _, self.target_critic_model = self.create_critic_model(self.target_critic_opt)\n\n self.update_critic = []\n for variable, target in zip(self.critic_model.trainable_weights, self.target_critic_model.trainable_weights):\n self.update_critic.append(tf.assign(target, self.tau * variable + (1 - self.tau) * target ))\n\n self.update_actor = []\n for variable, target in zip(self.actor_model.trainable_weights, self.target_actor_model.trainable_weights):\n self.update_actor.append(tf.assign(target, self.tau * variable + (1 - self.tau) * target ))\n\n self.update_temp_critic = []\n for variable, target in zip(self.critic_model.trainable_weights, self.temp_critic_model.trainable_weights):\n self.update_temp_critic.append(tf.assign(variable, target))\n\n self.update_temp_actor = []\n for variable, target in zip(self.actor_model.trainable_weights, self.temp_actor_model.trainable_weights):\n self.update_temp_actor.append(tf.assign(variable, target))\n\n # Initialize for later gradient calculations\n self.init_gradient()\n if(not self.load_model): self.sess.run(tf.initialize_all_variables())\n\n def create_actor_model(self, opt):\n\n if(not self.load_model):\n state_input = Input(shape=self.env.observation_space.shape)\n task_input = Input(shape=self.env.task_space.shape)\n input = Concatenate()([state_input, task_input])\n h1 = Dense(256, activation='relu')(input)\n h2 = Dense(256, activation='relu')(h1)\n output = Dense(self.env.action_space.shape[0], activation='softmax')(h2)\n\n model = Model(input=[state_input, task_input], output=output)\n # adam = Adam(lr=0.001)\n\n model.compile(loss=\"mse\", optimizer=opt)\n return model.input[0], model.input[1], model\n else:\n model = load_model('model_a')\n model.compile(loss=\"mse\", optimizer=opt)\n return model.input[0], model.input[1], model\n\n\n def create_critic_model(self,opt):\n if (not self.load_model):\n state_input = Input(shape=self.env.observation_space.shape)\n state_h1 = Dense(256, activation='relu')(state_input)\n h2 = Dense(256, activation='relu')(state_h1)\n output = Dense(1, activation='linear')(h2)\n model = Model(input=state_input, output=output)\n model.compile(loss=\"mse\", optimizer=opt)\n return state_input, model\n else:\n model = load_model('model_c')\n model.compile(loss=\"mse\", optimizer=opt)\n return model.input, model\n\n def init_gradient(self):\n self.delta = tf.placeholder(tf.float32, None)\n self.action_ind = tf.placeholder(tf.int32, None)\n self.I = tf.placeholder(tf.float32, None)\n self.actor_grad_input = tf.negative( tf.math.log(self.actor_model.output[0, self.action_ind]) * self.I * self.delta )\n self.critic_grad_input = tf.negative(self.critic_model.output * self.I * self.delta)\n self.actor_grads = tf.gradients( self.actor_grad_input,\n self.actor_model.trainable_weights)\n self.critic_grads = tf.gradients(self.critic_grad_input,\n self.critic_model.trainable_weights)\n\n for i, g in enumerate(self.actor_grads):\n if g is not None:\n self.actor_grads[i] = tf.clip_by_norm(g, self.gradient_clip_thres)\n\n for i, g in enumerate(self.critic_grads):\n if g is not None:\n self.critic_grads[i] = tf.clip_by_norm(g, self.gradient_clip_thres)\n\n actor_grads = zip(self.actor_grads, self.temp_actor_model.trainable_weights)\n critic_grads = zip(self.critic_grads, self.temp_critic_model.trainable_weights)\n\n self.train_critic = self.temp_critic_opt.apply_gradients(critic_grads)\n self.train_actor = self.temp_actor_opt.apply_gradients(actor_grads)\n\n def train(self):\n if len(self.memory)self.memorySize ):\n self.memory.pop(0)\n self.priority = np.delete( self.priority, 0 )\n\n # ========================================================================= #\n # Model Predictions #\n # ========================================================================= #\n\n def act(self, cur_state, task):\n '''\n self.epsilon *= self.epsilon_decay\n if np.random.random() < self.epsilon:\n return sample_action()\n '''\n return self.target_actor_model.predict( [cur_state,task] )\n\n\nclass environment:\n def __init__(self):\n global problems\n self.action_space = np.zeros( (2,) )\n self.observation_space = np.zeros( (bufferLength*2,) )\n self.task_space = np.zeros((2,))\n\ndef sample_action():\n #ind = np.random.randint(len(problem_list) + len(concept_list))\n res = np.ones( (2,1) ).T\n res/=(res.size)\n return res\n\ndef normalize_list(action):\n #print(action.sum(), 'sum of action prob')\n if (action.sum() == 0):\n print('error!')\n action = action/action.sum()\n\n return action\n\ndef action_to_readable(action):\n #coordinate = np.argmax(action)\n #action = normalize_list(action)\n coordinate = np.random.choice(len(action[0]), 1, p=action[0])[0]\n ind = coordinate\n #ind = np.argmax(action)\n return ind\n\n\n#convert the observation to matrix used by NN training\ndef initialize_state():\n\n return [[],[]]\n\ndef state_to_input(state):\n res = copy.deepcopy(state)\n for i in range( len(res) ):\n for j in range( len(res[i])):\n res[i][j] += 1\n while( len(res[i])0 ):\n if(state[0][0] == A):\n if(curP < p0a):\n state[0].pop(0)\n else:\n if(curP < p0b):\n state[0].pop(0)\n if (len(state[1]) > 0):\n if (state[1][0] == A):\n if (curP < p1a):\n state[1].pop(0)\n else:\n if (curP < p1b):\n state[1].pop(0)\n\ndef update_state_by_action(state, action, task):\n if( len(state[action])>=bufferLength ): return 0\n else:\n state[action].append(task)\n return 1\n\ndef task_to_matrix(task):\n res = [0,0]\n res[task] = 1\n return np.array([res])\n\ndef main_endless():\n f = open('res.txt', 'w')\n f.close()\n\n sess = tf.Session() # config=tf.ConfigProto(log_device_placement=True))\n # print(device_lib.list_local_devices())\n K.set_session(sess)\n env = environment()\n actor_critic = ActorCritic(env, sess)\n holdQueue = []\n for round in range(20000):\n print(round)\n total_t = 0\n total_correct = 0\n total_incorrect = 0\n # random.seed()\n\n # random.shuffle(users)\n cur_state = initialize_state()\n I = 1.0\n total_waiting_time = 0\n ind = 0\n cur_state = initialize_state()\n I = 1.0\n total_reward = 0\n for task_ind in range(2):\n new_task = random.randrange(2)\n # new_task = task_to_matrix(new_task)\n action = actor_critic.act(state_to_input(cur_state), task_to_matrix(new_task))\n # print(action)\n action_translated = action_to_readable(action)\n\n # print('gaga')\n # action_translated = action_to_readable(action)\n new_state = copy.deepcopy(cur_state)\n reward = update_state_by_action(new_state, action_translated, new_task)\n if (task_ind == 1): working(new_state)\n actor_critic.remember(state_to_input(cur_state), action_translated, reward, \\\n state_to_input(new_state), False, I, task_to_matrix(new_task))\n actor_critic.train()\n cur_state = new_state\n f = open('res.txt', 'a+')\n print('reward: ', reward)\n f.write(str(reward))\n f.write('\\n')\n f.close()\n\n #actor_critic.critic_model.save('model_c')\n #actor_critic.actor_model.save('model_a')\n\ndef main():\n f = open('res.txt','w')\n f.close()\n\n sess = tf.Session()#config=tf.ConfigProto(log_device_placement=True))\n #print(device_lib.list_local_devices())\n K.set_session(sess)\n env = environment()\n actor_critic = ActorCritic(env, sess)\n holdQueue = []\n for round in range(40000):\n if( (round+1)%500 == 0):\n actor_critic.critic_model.save('model_c')\n actor_critic.actor_model.save('model_a')\n print(round)\n total_t = 0\n total_correct = 0\n total_incorrect = 0\n #random.seed()\n\n #random.shuffle(users)\n cur_state = initialize_state()\n I = 1.0\n total_waiting_time = 0\n ind = 0\n cur_state = initialize_state()\n I = 1.0\n total_reward = 0\n for ind in range(20):\n\n for task_ind in range(2):\n #if (random.randrange(10) > 8): continue\n new_task = random.randrange(2)\n #new_task = task_to_matrix(new_task)\n action = actor_critic.act(state_to_input(cur_state), task_to_matrix(new_task))\n # print(action)\n action_translated = action_to_readable(action)\n\n # print('gaga')\n # action_translated = action_to_readable(action)\n new_state = copy.deepcopy(cur_state)\n reward = update_state_by_action(new_state, action_translated, new_task)\n if(task_ind== 1): working(new_state)\n actor_critic.remember(state_to_input(cur_state), action_translated, reward, \\\n state_to_input(new_state), reward==0, I, task_to_matrix(new_task))\n actor_critic.train()\n cur_state = new_state\n total_reward += reward\n I = I * actor_critic.gamma\n if(reward == 0): break\n #print(cur_state)\n if (reward == 0): break\n f = open('res.txt','a+')\n print('total reward: ', total_reward)\n f.write(str(total_reward))\n f.write('\\n')\n f.close()\n\n\n\nif __name__ == \"__main__\":\n main()\n #printStates()","sub_path":"AC_with_prioritize.py","file_name":"AC_with_prioritize.py","file_ext":"py","file_size_in_byte":15585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"636170103","text":"import ConfigParser\r\nimport re,datetime\r\n\r\ncp = ConfigParser.ConfigParser()\r\ncp.read(\"blacklist_match.conf\")\r\nsection = cp.sections()\r\n# print section\r\ndef get_func():\r\n parse_blacklist_key = cp.options(\"parse_blacklist\")\r\n #function module\r\n moudle_func = cp.get(\"parse_blacklist\", parse_blacklist_key[0])\r\n moudle_list = moudle_func.split(',')\r\n # print moudle_list\r\n moudle_name = {}\r\n for temp in moudle_list:\r\n fname,ftimes=temp.split(\":\")\r\n fname = fname.strip()\r\n # as a dict: key is filename,value is the update frequency\r\n moudle_name[fname]=ftimes\r\n return moudle_name\r\n\r\ndef get_store_path():\r\n #source_data_path\r\n source_store_path_key = cp.options('source_store_path')\r\n source_store_path = []\r\n for temp in source_store_path_key:\r\n source_store_path.append(cp.get('source_store_path', temp))\r\n return source_store_path\r\n\r\n\r\ndef get_ES_info():\r\n # ES information\r\n source_store_path_key=cp.options(\"ES_info\")\r\n #value=cp.get(sectionName,keyword)\r\n server=cp.get('ES_info',source_store_path_key[0])\r\n dport=cp.get('ES_info',source_store_path_key[1])\r\n indx=cp.get('ES_info',source_store_path_key[2])\r\n aggs_name=cp.get('ES_info',source_store_path_key[3])\r\n return server,dport,indx,aggs_name\r\n\r\ndef getCheckDeltatime():\r\n #check frequency\r\n timekey1=cp.options(\"delta_time_check\")\r\n times=cp.getint(\"delta_time_check\",timekey1[0])\r\n deltatime=datetime.timedelta(minutes=times)\r\n timekey2=cp.options(\"frequency\")\r\n starttime=cp.get(\"frequency\",timekey2[0])\r\n return deltatime,starttime\r\n\r\ndef get_module_path():\r\n #source_data_path\r\n module_path_key = cp.options('blacklist_moudle_path')\r\n # print module_path_key\r\n module_path = []\r\n for temp in module_path_key:\r\n module_path.append(cp.get('blacklist_moudle_path', temp))\r\n return module_path\r\n\r\ndef get_method():\r\n # get subnet method\r\n source_store_path_key = cp.options(\"subnet_methods\")\r\n # value=cp.get(sectionName,keyword)\r\n flg_lpm = cp.getint('subnet_methods', source_store_path_key[0])\r\n flg_full = cp.getint('subnet_methods', source_store_path_key[1])\r\n return flg_lpm,flg_full\r\n# print cp.sections\r\n#cun period\r\n#############################################################################################################################\r\n# frequency_key = cp.options(sections[3])\r\n# frequency = []\r\n# for temp in frequency_key:\r\n# frequency.append(cp.get('frequency', temp))\r\n#\r\n# # print frequency\r\n# regex1=re.compile(r'\\d+')\r\n# regex2=re.compile(r'[a-zA-Z]+')\r\n# period_num = regex1.findall(frequency[1])[0]\r\n# period_scale = regex2.findall(frequency[1])[0]\r\n# def export_period():\r\n# if period_scale == 's'or period_scale == 'S' :\r\n# period = datetime.timedelta(seconds = int(period_num))\r\n# elif period_scale == 'm'or period_scale == 'M':\r\n# period = datetime.timedelta(minutes = int(period_num))\r\n# elif period_scale == 'd' or period_scale == 'D':\r\n# period = datetime.timedelta(days = int(period_num))\r\n# return period\r\n#############################################################################################################################","sub_path":"project/parser_config.py","file_name":"parser_config.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"467448929","text":"import csv, math, ast \nimport numpy as np\nimport matplotlib.pylab as plt\n\ndef poisson(actual, mean):\n return math.pow(mean, actual) * math.exp(-mean) / math.factorial(actual)\n\ncsvFile = '20152016.csv'\ntau = 1.0\nteam_list = []\ntvsum = []\n\nk = open('team_list.txt', 'w')\nk.write(\"\"\"{\n\"\"\")\n\ncsvRead = csv.reader(open(csvFile))\nnext(csvRead)\n\nfor row in csvRead:\n\tif row[2] not in team_list:\n\t\tteam_list.append(row[2])\n\tif row[3] not in team_list:\n\t\tteam_list.append(row[3])\n\nteam_list.sort()\n\nfor team in team_list:\n\tk.write(\"\"\"\t'%s': {'home_goals': 0, 'away_goals': 0, 'home_conceded': 0, 'away_conceded': 0, 'home_games': 0, 'away_games': 0, 'alpha_h': 0, 'beta_h': 0, 'alpha_a': 0, 'beta_a': 0},\n\"\"\" % (team))\n\nk.write(\"}\")\nk.close()\n\ns = open('team_list.txt', 'r').read()\ndict = ast.literal_eval(s)\n\nGAMES_PLAYED = 0\nWEEKS_WAIT = 4\nTOTAL_VALUE = 0\n\ncsvRead = csv.reader(open(csvFile))\nnext(csvRead)\n\nfor game in csvRead:\n\thome_team = game[2]\n\taway_team = game[3]\n\n\thome_goals = int(game[4])\n\taway_goals = int(game[5])\n\n\thome_win_prob = 0\n\tdraw_win_prob = 0\n\taway_win_prob = 0\n\t\n\tcurr_home_goals = 0\n\tcurr_away_goals = 0\n\tavg_home_goals = 1\n\tavg_away_goals = 1\n\t\n\tteam_bet = ''\n\tev_bet = ''\n\t\n\t# GETTING UPDATED VARIABLES\n\tfor key, value in dict.items():\n\t\tcurr_home_goals += dict[key]['home_goals']\n\t\tcurr_away_goals += dict[key]['away_goals']\n\t\t\n\t\tif GAMES_PLAYED > (WEEKS_WAIT * 10):\n\t\t\tavg_home_goals = curr_home_goals / (GAMES_PLAYED)\n\t\t\tavg_away_goals = curr_away_goals / (GAMES_PLAYED)\n\t\n\t\n\t# CALCULATING FACTORS\n\tif GAMES_PLAYED > (WEEKS_WAIT * 10):\n\t\thome_team_a = (dict[home_team]['alpha_h'] + dict[home_team]['alpha_a']) / 2\n\t\taway_team_a = (dict[away_team]['alpha_h'] + dict[away_team]['alpha_a']) / 2\n\t\t\n\t\thome_team_d = (dict[home_team]['beta_h'] + dict[home_team]['beta_a']) / 2\n\t\taway_team_d = (dict[away_team]['beta_h'] + dict[away_team]['beta_a']) / 2\n\t\t\n\t\thome_team_exp = avg_home_goals * home_team_a * away_team_d\n\t\taway_team_exp = avg_away_goals * away_team_a * home_team_d\n\t\n\t\n\t# RUNNING POISSON\t\n\t\tl = open('poisson.txt', 'w')\n\t\t\n\t\tfor i in range(10):\n\t\t\tfor j in range(10):\n\t\t\t\tprob = tau * poisson(i, home_team_exp) * poisson(j, away_team_exp)\n\t\t\t\tl.write(\"Prob%s%s = %s\\n\" % (i, j, prob))\n\t\t\n\t\tl.close()\n\t\t\n\t\twith open('poisson.txt') as f:\n\t\t\tprobsum = 0.0\n\t\t\tfor line in f:\n\t\t\t\t\n\t\t\t\thome_goals_m = int(line.split(' = ')[0][4])\n\t\t\t\taway_goals_m = int(line.split(' = ')[0][5])\n\t\t\t\t\n\t\t\t\tprob = float(line.split(' = ')[1])\n\t\t\t\tprobsum = probsum + prob\n\t\t\t\tif home_goals_m > away_goals_m:\n\t\t\t\t\thome_win_prob += prob\n\t\t\t\telif home_goals_m == away_goals_m:\n\t\t\t\t\tdraw_win_prob += prob\n\t\t\t\telif home_goals_m < away_goals_m:\n\t\t\t\t\taway_win_prob += prob\n\n\t#CALCULATE VALUE\n\t\tbet365odds_h, bet365odds_d, bet365odds_a = float(game[23]), float(game[24]), float(game[25])\n\t\t\n\t\tev_h = (home_win_prob/probsum * (bet365odds_h - 1)) - (1 - home_win_prob/probsum)\n\t\tev_d = (draw_win_prob/probsum * (bet365odds_d - 1)) - (1 - draw_win_prob/probsum)\n\t\tev_a = (away_win_prob/probsum * (bet365odds_a - 1)) - (1 - away_win_prob/probsum)\n\t\t\n\t\thighestEV = max(ev_h, ev_d, ev_a)\n\t\t\n\t\tif (ev_h == highestEV) and (ev_h > 0):\n\t\t\tteam_bet = home_team\n\t\t\tev_bet = ev_h\n\t\t\tif home_goals > away_goals:\n\t\t\t\tTOTAL_VALUE += (bet365odds_h - 1)\n\t\t\telse:\n\t\t\t\tTOTAL_VALUE -= 1\n\t\t\t\t\n\t\telif (ev_d == highestEV) and (ev_d > 0):\n\t\t\tteam_bet = 'Draw'\n\t\t\tev_bet = ev_d\n\t\t\tif home_goals == away_goals:\n\t\t\t\tTOTAL_VALUE += (bet365odds_d - 1)\n\t\t\telse:\n\t\t\t\tTOTAL_VALUE -= 1\n\t\telif (ev_a == highestEV) and (ev_a > 0):\n\t\t\tteam_bet = away_team\n\t\t\tev_bet = ev_a\n\t\t\tif home_goals < away_goals:\n\t\t\t\tTOTAL_VALUE += (bet365odds_a - 1)\n\t\t\telse:\n\t\t\t\tTOTAL_VALUE -= 1\n\t\t\n\t\t\n\t\ttvsum.append(TOTAL_VALUE)\n\t\tif (team_bet != '') and (ev_bet != ''):\n\t\t\tprint (\"Bet on '%s' (EV = %s)\" % (team_bet, ev_bet))\t\n\t\t\tprint (TOTAL_VALUE)\n\t\t\n\t# UPDATE VARIABLES AFTER MATCH HAS BEEN PLAYED\n\tdict[home_team]['home_goals'] += home_goals\n\tdict[home_team]['home_conceded'] += away_goals\n\tdict[home_team]['home_games'] += 1\n\t\n\tdict[away_team]['away_goals'] += away_goals\n\tdict[away_team]['away_conceded'] += home_goals\n\tdict[away_team]['away_games'] += 1\n\t\n\tGAMES_PLAYED += 1\n\t\n\t# CREATE FACTORS\n\tif GAMES_PLAYED > (WEEKS_WAIT * 10):\n\t\tfor key, value in dict.items():\n\t\t\talpha_h = (dict[key]['home_goals'] / dict[key]['home_games']) / avg_home_goals\n\t\t\tbeta_h = (dict[key]['home_conceded'] / dict[key]['home_games']) / avg_away_goals\n\n\t\t\talpha_a = (dict[key]['away_goals'] / dict[key]['away_games']) / avg_away_goals\n\t\t\tbeta_a = (dict[key]['away_conceded'] / dict[key]['away_games']) / avg_home_goals\n\n\t\t\tdict[key]['alpha_h'] = alpha_h\n\t\t\tdict[key]['beta_h'] = beta_h\n\t\t\tdict[key]['alpha_a'] = alpha_a\n\t\t\tdict[key]['beta_a'] = beta_a\n\t\t\t\n\t\t\t\n\t\t\t\n#plot results\nfig = plt.figure()\nax1 = fig.add_subplot(111)\nax1.plot(tvsum)\nxl = list(ax1.get_xlim())\nax1.plot(xl,[0,0],ls='--',color='k')\nplt.savefig('fig_profit.png')\t\t\t\n\t\t\t","sub_path":"football_project/code_dir/main_code_poison.py","file_name":"main_code_poison.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"361011209","text":"import csv\r\nimport hashlib\r\nfrom datetime import datetime, timedelta\r\n\r\ndef get_cheaters():\r\n dictionary = [\r\n {\"user_id\":\"34\",\"evidencias\":[{\"challenge_id\":\"2\", \"date\":\"28/03/2019 11:10:19\"},{\"challenge_id\":\"4\", \"date\":\"28/03/2019 11:10:30\"},{\"challenge_id\":\"4\", \"date\":\"28/03/2019 11:10:42\"}],\"tramposo\":True},\r\n {\"user_id\":\"35\",\"evidencias\":[{\"challenge_id\":\"3\", \"date\":\"28/03/2019 11:10:19\"},{\"challenge_id\":\"4\", \"date\":\"28/03/2019 11:10:30\"},{\"challenge_id\":\"4\", \"date\":\"28/03/2019 11:10:42\"}],\"tramposo\":True},\r\n {\"user_id\":\"36\",\"evidencias\":[{\"challenge_id\":\"4\", \"date\":\"28/03/2019 11:10:19\"},{\"challenge_id\":\"4\", \"date\":\"28/03/2019 11:10:30\"},{\"challenge_id\":\"4\", \"date\":\"28/03/2019 11:10:42\"}],\"tramposo\":True},\r\n {\"user_id\":\"37\",\"evidencias\":[{\"challenge_id\":\"5\", \"date\":\"28/03/2019 11:10:19\"},{\"challenge_id\":\"4\", \"date\":\"28/03/2019 11:10:30\"},{\"challenge_id\":\"4\", \"date\":\"28/03/2019 11:10:42\"}],\"tramposo\":True},\r\n ]\r\n\r\n return dictionary\r\n\r\ndef hashit(txt):\r\n return hashlib.md5(txt.encode('utf-8')).hexdigest()\r\n\r\ndef get_challenges_id_name_value():\r\n input_file = csv.DictReader(open(\"challenges.csv\"))\r\n dictionary = {}\r\n for row in input_file:\r\n #print(row[\"id\"],row[\"value\"])\r\n dictionary.update({row[\"id\"]:[{\"name\":row[\"name\"],\"value\":row[\"value\"]}]})\r\n\r\n return dictionary\r\n\r\n\r\nchallenges_idvalue = get_challenges_id_name_value()\r\n\r\ndef get_cheater_users():\r\n input_file_1 = csv.DictReader(open(\"solves.csv\"))\r\n input_file_2 = csv.DictReader(open(\"solves.csv\"))\r\n dictionary = {}\r\n result = []\r\n tmp=[]\r\n listbadguys=[\"36\",\"48\"]\r\n for row in input_file_1:\r\n #print(row[\"id\"],row[\"name\"])\r\n #if row[\"id\"] not in listbadguys:\r\n # continue\r\n \r\n \r\n if row[\"user_id\"] not in tmp:\r\n dictionary.update({row[\"user_id\"]:[]})\r\n result.append({\"user_id\":row[\"user_id\"],\"evidencias\":[]})\r\n tmp.append(row[\"user_id\"])\r\n \r\n \r\n \r\n bypass=False\r\n\r\n for row in input_file_2:\r\n for user_id, info in dictionary.items():\r\n if row[\"user_id\"] == user_id:\r\n \r\n if(int(challenges_idvalue[row[\"challenge_id\"]][0][\"value\"])>100 or bypass):\r\n dictionary[user_id].append({\"challenge_id\":row[\"challenge_id\"],\"date\":row[\"date\"].format(datetime.now())})\r\n \r\n \r\n \r\n \r\n for user_id, info in dictionary.items():\r\n evidencias = []\r\n anterior=None\r\n for x in info:\r\n FMT = '%H:%M:%S'\r\n tdelta=\"\"\r\n if anterior is not None:\r\n actual=x[\"date\"].split()[1]\r\n s = datetime.strptime(actual, FMT) - datetime.strptime(anterior, FMT)\r\n tdelta = str(timedelta(seconds=s.seconds))\r\n if s < timedelta(minutes=3) or bypass: \r\n users_idname = get_users_id_and_name()\r\n info_users = users_idname[user_id]\r\n for info_us in info_users:\r\n user_name=info_us[\"name\"]\r\n \r\n print(\"user_name: \"+user_name+\", diferencia entre reto un reto y el siguiente: no es de más de 3 minutos, es de: \"+tdelta+\" minutos\")\r\n \r\n anterior_1.update({\"siguiente\":x,\"diferencia_min\":tdelta})\r\n evidencias.append(anterior_1)\r\n #evidencias.append(tdelta)\r\n #evidencias.append(x)\r\n anterior = x[\"date\"].split()[1]\r\n anterior_1 = x\r\n i=0\r\n for us in result:\r\n \r\n if us[\"user_id\"] == user_id:\r\n if len(evidencias) == 0:\r\n result.pop(i)\r\n else:\r\n if len(evidencias) > 2:\r\n result[i].update({\"user_id\":user_id,\"evidencias\":evidencias})\r\n i+=1\r\n\r\n i=0\r\n for x in result:\r\n if len(x[\"evidencias\"])==0:\r\n result.pop(i)\r\n i+=1\r\n\r\n\r\n return result\r\n\r\n#get_cheater_users()\r\n\r\ndef get_users_id_and_name():\r\n input_file = csv.DictReader(open(\"users.csv\"))\r\n dictionary = {}\r\n for row in input_file:\r\n #print(row[\"id\"],row[\"name\"])\r\n dictionary.update({row[\"id\"]:[{\"name\":row[\"name\"],\"team_id\":row[\"team_id\"]}]})\r\n\r\n return dictionary\r\n\r\n\r\n\r\ndef get_teams_id_and_name():\r\n input_file = csv.DictReader(open(\"teams.csv\"))\r\n dictionary = {}\r\n for row in input_file:\r\n #print(row[\"id\"],row[\"value\"])\r\n dictionary.update({row[\"id\"]:row[\"name\"]})\r\n\r\n return dictionary\r\n\r\ndef get_solves_teamid_challengeid():\r\n input_file = csv.DictReader(open(\"solves.csv\"))\r\n input_file2 = csv.DictReader(open(\"solves.csv\"))\r\n dictionary = {}\r\n for row in input_file:\r\n dictionary.update({row[\"team_id\"]:[]})\r\n\r\n for row in input_file2:\r\n dictionary[row[\"team_id\"]].append(row[\"challenge_id\"])\r\n \r\n return dictionary\r\n\r\n\r\n\r\n\r\nusers_idname = get_users_id_and_name()\r\ncheaters = get_cheater_users()\r\n\r\nfinaldict = []\r\n\r\nfor ch in cheaters:\r\n info_users = users_idname[ch[\"user_id\"]]\r\n for info_us in info_users:\r\n user_name=info_us[\"name\"]\r\n user_id=ch[\"user_id\"]\r\n user_team=info_us[\"team_id\"]\r\n print(hashit(user_name))\r\n user_score=0\r\n for evi in ch[\"evidencias\"]:\r\n challenge_id = evi[\"challenge_id\"]\r\n challenge_info=challenges_idvalue[challenge_id]\r\n \r\n for info_ch in challenge_info:\r\n challenge_name=info_ch[\"name\"]\r\n challenge_value=info_ch[\"value\"]\r\n print(\"\\t\"+challenge_name+\" con un valor de: \"+challenge_value+\", tiempo de resolución: \"+evi[\"date\"]+\", siguiente repo completado: \"+evi[\"siguiente\"][\"date\"])\r\n user_score+=int(challenge_value)\r\n #print(user_score)\r\n #if user_id not in finaldict:\r\n finaldict.append({user_id:{\"user_id\":user_id,\"user_name\":user_name, \"user_score\":user_score,\"team\":user_team}})\r\n\r\nteams_idname = get_teams_id_and_name()\r\nprint(\"-------------------\")\r\npuntostrampososporequipo={}\r\nfor user in finaldict:\r\n print(user)\r\n for id,info in user.items():\r\n score=info[\"user_score\"]\r\n if any(info[\"team\"] in d for d in puntostrampososporequipo):\r\n score+= int(puntostrampososporequipo[info[\"team\"]])\r\n puntostrampososporequipo.update({info[\"team\"]:score})\r\n\r\nfor equipos,puntostramposos in puntostrampososporequipo.items():\r\n print(\"hay que restarle \"+str(puntostramposos)+\" al equipo \"+hashit(teams_idname[equipos]))\r\n\r\n\r\nprint(\"-------------------\")\r\n\r\nteams_and_its_challenges = get_solves_teamid_challengeid()\r\ntotalperteam={}\r\nfor key, value in teams_and_its_challenges.items():\r\n score = 0\r\n for ch_id in value:\r\n for ch_info in challenges_idvalue[ch_id]:\r\n score+=int(ch_info[\"value\"])\r\n totalperteam.update({key:score})\r\n\r\n\r\n\r\n\r\npuntossintrampasporequipo = {}\r\nfor equipo_1,puntostotales in totalperteam.items():\r\n print(\"el equipo: \"+hashit(teams_idname[equipo_1])+\" sin quitarle los puntos de tramposos es: \"+str(puntostotales))\r\n for equipo_2, puntostramposos in puntostrampososporequipo.items():\r\n if equipo_2 == equipo_1:\r\n totalperteam.update({equipo_1:puntostotales-puntostramposos})\r\nprint(\"-------------------\")\r\n\r\nfor equipo, puntos in totalperteam.items():\r\n print(\"el equipo: \"+hashit(teams_idname[equipo])+\" tras quitarles los puntos de tramposos es: \"+str(puntos))\r\n","sub_path":"ctf-cheater-detector.py","file_name":"ctf-cheater-detector.py","file_ext":"py","file_size_in_byte":7250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"278907032","text":"#!/usr/bin/env python\n# _*_coding:utf-8_*_\n\nimport sys\nimport os\nimport re\nimport math\nimport collections\n\n# the current python file location\npPath = os.path.split(os.path.realpath(__file__))[0]\nsys.path.append(pPath)\n\nUSAGE = \"\"\"\nUSAGE:\n python CTDDClass.py input.fasta output amino_acids_group_1 amino_acids_group_2 ... amino_acids_group_N\n\n input.fasta: the input protein sequence file in fasta format. \n output: the encoding file.\n amino_acids_group_x the amino acids groups.\n\nEXAMPLE:\n python CTDDClass.py example/test-protein.txt CTDDClass.tsv RKEDQN GASTPHY CLVIMFW\n\"\"\"\n\n# -------------------------------------------------------------------------------------------------------------------------------------\n\n\n# merged from readFasta.py\ndef readFasta(file):\n if not os.path.exists(file):\n print('Error: \"' + file + '\" does not exist.')\n sys.exit(1)\n\n with open(file) as f:\n records = f.read()\n\n if re.search('>', records) is None:\n print('The input file seems not in fasta format.')\n sys.exit(1)\n\n records = records.split('>')[1:]\n fastas = []\n for fasta in records:\n array = fasta.split('\\n')\n name, sequence = array[0].split()[0], re.sub('[^ARNDCQEGHILKMFPSTWYV-]', '', ''.join(array[1:]).upper())\n fastas.append([name, sequence])\n return fastas\n\n\n# merged from saveCode.py\ndef savetsv(encodings, file='encoding.tsv'):\n lengthOfEncodings = len(encodings)\n with open(file, 'w') as f:\n if lengthOfEncodings == 0:\n f.write('Descriptor calculation failed.')\n else:\n for i in range(len(encodings[0]) - 1):\n f.write(encodings[0][i] + '\\t')\n f.write(encodings[0][-1] + '\\n')\n for encoding in encodings[1:]:\n for j in range(0, len(encoding) - 1):\n f.write(str(encoding[j]) + '\\t')\n f.write(str(encoding[len(encoding)-1]) + '\\n')\n return None\n\n# -------------------------------------------------------------------------------------------------------------------------------------\n\n\ndef count(group, sequence):\n number = 0\n lengthOfSequence = len(sequence)\n\n letterFrequency = collections.Counter(sequence)\n for letter, frequency in letterFrequency.items():\n if letter in group:\n number = number + frequency\n\n cutoffNums = [1, math.floor(0.25 * number), math.floor(0.50 * number), math.floor(0.75 * number), number]\n cutoffNums = [num if num >= 1 else 1 for num in cutoffNums]\n\n code = []\n for num in cutoffNums:\n count = 0\n for i in range(lengthOfSequence):\n if sequence[i] in group:\n count += 1\n if count == num:\n code.append((i + 1) / lengthOfSequence * 100)\n break\n if count == 0:\n code.append(0)\n return code\n\n\ndef ctdd(fastas, groups):\n encodings = []\n header = ['#']\n\n for g in range(len(groups)):\n for d in ['0', '25', '50', '75', '100']:\n header.append('Group.' + str(g+1) + '.residue' + d)\n encodings.append(header)\n\n for fasta in fastas:\n name, sequence = fasta[0], fasta[1]\n code = [name]\n for group in groups:\n code = code + count(group, sequence)\n encodings.append(code)\n\n return encodings\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 5:\n print(USAGE)\n sys.exit(1)\n\n groups = sys.argv[3:]\n\n groupsStr = ''.join(groups)\n groupsStr = re.sub('[^ACDEFGHIKLMNPQRSTVWY]', '', groupsStr)\n if len(groupsStr) != 20 or len(set(groupsStr)) != 20:\n print('\\nERROR: The amino acid must be no-repeat in each groups and the sum is 20!\\n')\n\n fastas = readFasta(sys.argv[1])\n if (0 == len(fastas)):\n print(\"The content of input file is empty!\\n\")\n encodings = ctdd(fastas, groups)\n savetsv(encodings, sys.argv[2])\n","sub_path":"CTDDClass.py","file_name":"CTDDClass.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"326228159","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport time\n\nimport altair as alt # out of the box\nimport matplotlib.pyplot as plt # out of the box\nimport plotly.figure_factory as ff # requires scipy\nimport graphviz\nfrom PIL import Image\n\nimport datetime \n\n# Adding a title\nst.title('A very basic streamlit app \\n')\n\n# markdown magic\n\"\"\"\nA lightweight and fast framework for interactive visualization:\n* Showcase the results of **ML models**\n* Write clean code without _callbacks_\n\n```bash\nstreamlit run my_app.py\n```\n\nThe markdown also supports LaTeX (more exactly KaTeX):\n\n$$y_i \\sim N(\\mu_i, \\sigma^2)$$\n\"\"\"\n\nst.latex(r'''\n a + ar + a r^2 + a r^3 + \\cdots + a r^{n-1} =\n \\sum_{k=0}^{n-1} ar^k =\n a \\left(\\frac{1-r^{n}}{1-r}\\right)\n ''')\n\nst.sidebar.markdown(\n \"\"\"\n ### Exploring the features\n * UI elements\n * Caching and updating\n \"\"\"\n)\n\n# Explore UI elements\nst.header(\"The standard API\")\nst.subheader(\"Exploring basic UI elements\")\nwith st.echo():\n # show some code in the app\n def hello_function(my_name):\n return \"Hello \" + my_name + \"!\"\n\nst.code(\"\"\"pip list | grep pandas\"\"\", language=\"bash\")\nst.json({\n 'amid': 'PCT9991',\n 'number_comparables': 10,\n 'parameters': [\n 'color', 'padding', 'shape'\n ]\n})\n\n# a button\nif st.button('Say hello'):\n st.write(\"Hello there\")\nelse:\n st.write(\"Goodbye\")\n\n\n# radioboxes\ngenre = st.radio(\n \"What's your favorite movie genre?\",\n ('Comedy', 'Drama', 'Documentary'), \n index=1\n)\n\nif genre == \"Comedy\":\n st.write(\"You should watch Dumb and the Dumber\")\nelse:\n st.write(\"You should watch Twelve Angry Men\")\n\n\noption = st.selectbox(\n \"How would you like to be contacted?\",\n (\"Email\", \"Home Phone\", \"Mobile Phone\")\n)\n\nst.write(\"You selected: \", option)\n\n\noptions = st.multiselect(\n \"What are your favorite colors\",\n ('Green', \"Yellow\", \"Red\", \"Blue\"),\n ('Yellow', 'Red')\n)\nst.write(\"You selected:\", options)\n\n\nd = st.date_input(\n 'When is your birthday',\n datetime.date(2019, 7, 6))\nst.write('Your birthday is:', d)\n\n# t = st.time_input('Set an alarm for', datetime.time(8, 45))\n# st.write('Alarm is set for', t)\n\n\n# widgets are treated as variables\nst.subheader(\"A slider example\")\nx = st.slider('Select a value', min_value=1, max_value=100, value=12)\nst.write(x, 'squared is', x ** 2) # can pass anything into it\n\n\n# listing a pandas.DataFrame\nst.subheader(\"A dummy table updated by the slider\")\ndf_dummy = pd.DataFrame({\n 'first column': [1, 2, 3, 4],\n 'second column': [elem * x for elem in [10, 10, 30, 40]]\n})\n\n# st.write(df_dummy)\n# st.dataframe(df_dummy)\nst.table(df_dummy)\noption = st.sidebar.selectbox(\n \"Which value of the first column do you want to select?\",\n df_dummy['first column']\n)\n\nst.sidebar.markdown('You selected the option nr. ' + str(option))\n\n\n# load some data into memory\nst.subheader(\"Loading data with caching\")\nread_and_cache_csv = st.cache(pd.read_csv)\n\nBUCKET = \"https://streamlit-self-driving.s3-us-west-2.amazonaws.com/\"\ndata = read_and_cache_csv(BUCKET + \"labels.csv.gz\", nrows=1000)\ndesired_label = st.selectbox('Filter to:', ['car', 'truck'])\nst.write(data[data.label == desired_label])\n\n\n# drawing basic charts\nst.header(\"Data Visualization\")\nst.subheader(\"Some basic charts/plots\")\n\nchart_data = pd.DataFrame(\n np.random.randn(100, 3), \n columns=['a', 'b', 'c']\n)\nst.line_chart(chart_data) # uses Vega by default\n\nif st.checkbox('Show bar and area plots', False):\n # area and bar charts\n chart_data_low = pd.DataFrame(\n np.random.randn(20, 3),\n columns=['a', 'b', 'c']\n )\n st.area_chart(chart_data_low)\n st.bar_chart(chart_data_low)\n\n # matplotlib chart\n _ = plt.hist(chart_data['a'], bins=30)\n _ = plt.title(\"Distribution of X\")\n st.pyplot()\n\n# drawing a scatterplot\ndf_normal = pd.DataFrame(\n np.random.randn(200, 3),\n columns=['a', 'b', 'c']\n)\n\n# altair charts\nc = alt.Chart(df_normal).mark_circle().encode(\n x='a', y='b', size='c', color='c'\n)\nst.write(c)\n\n# vega charts\nvega_check = st.checkbox(\"Same, but with VegaLite\", False)\nif vega_check:\n st.vega_lite_chart(df_normal, {\n 'mark': 'circle',\n 'encoding': {\n 'x': {'field': 'a', 'type': 'quantitative'},\n 'y': {'field': 'b', 'type': 'quantitative'},\n 'size': {'field': 'c', 'type': 'quantitative'},\n 'color': {'field': 'c', 'type': 'quantitative'}\n }\n })\n\n# drawing a map\nmap_check = st.checkbox(\"Show map\", False)\nif map_check:\n map_data = pd.DataFrame(\n np.random.randn(1000, 2) / [50, 50] + [37.76, -122.4],\n columns=['lat', 'lon'])\n # st.map(map_data)\n\n # a better map with perspective\n st.deck_gl_chart(\n viewport={\n 'latitude': 37.76,\n 'longitude': -122.4,\n 'zoom': 11,\n 'pitch': 50,\n },\n layers=[{\n 'data': map_data,\n 'type': 'ScatterplotLayer'\n }]\n )\n\n# st.subheader(\"Simulate a long running process\")\n\n# latest_iteration = st.empty()\n# bar = st.progress(0)\n\n# for i in range(100):\n# latest_iteration.text(f'Iteration {i + 1}')\n# bar.progress(i + 1)\n# time.sleep(0.1)\n# st.write('we are done')\n\nx1 = np.random.randn(200) - 3\nx2 = np.random.randn(200)\nx3 = np.random.randn(200) + 2\n\nhist_data = [x1, x2, x3]\ngroup_labels = [\"Group 1\", \"Group 2\", \"Group 3\"]\n\nfig = ff.create_distplot(\n hist_data, group_labels, bin_size=[.1, .25, .5]\n)\nst.plotly_chart(fig)\n\n\n# plotting graphs\nst.graphviz_chart('''\n digraph {\n run -> intr\n intr -> runbl\n runbl -> run\n run -> kernel\n kernel -> zombie\n kernel -> sleep\n kernel -> runmem\n sleep -> swap\n swap -> runswap\n runswap -> new\n runswap -> runmem\n new -> runmem\n sleep -> runmem\n }\n''')\n\n\n# image = Image.open(\"my_image.png\")\n# st.image(image, caption='Sunrise by the mountains', use_column_width=True)","sub_path":"streamlit/hello_world/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"279616990","text":"from ....templates.contents import ContentsResponseTemplate, ContentsListingTemplate\nfrom ....GLOBAL.logging import logger\nfrom ....GLOBAL.functions import get_soup_attribute\nfrom ....GLOBAL.decorators import to_list, wrap_sort, cast_return\nfrom ....identifiers.contents import ScanningAndPublicationStatus\nfrom ....exceptions.responses import ResponseParsingFailedException\nfrom datetime import datetime\nimport re\n\nfrom bs4.element import NavigableString, Tag\n\nclass JaiminIsBoxContentsListing(ContentsListingTemplate):\n @classmethod\n @to_list\n def from_soup(cls, soup):\n for element in soup.find('div', {'class':'group'}).find_all('div', {'class':'element'}):\n date_string = element.find('div', {'class':'meta_r'}).text.split()[-1]\n \n link = element.find('div', {'class':'title'}).find('a') # get hyperlink\n name, url = get_soup_attribute(link, 'title'), get_soup_attribute(link, 'href')\n \n yield cls(url, name, datetime.strptime(date_string, '%Y.%m.%d'))\n\nclass JaiminIsBoxContentsResponse(ContentsResponseTemplate):\n @logger.wrap__name_during_entry('Parsing Contents Soup')\n def parse_soup(self, soup):\n parsed = {\n 'listings' : JaiminIsBoxContentsListing.from_soup(soup),\n 'name' : self._parse_name(soup)\n }\n \n aas = self._parse_author_artist_and_summary(\n soup.find('div', {'class':'info'})\n )\n \n parsed['authors'] = [aas['Author'], aas['Artist']]\n parsed['summary'] = aas['Synopsis']\n \n return parsed\n \n @logger.wrap_debug('Parsing Name')\n def _parse_name(self, soup):\n return soup.find('h1', {'class':'title'}).text.strip()\n \n @logger.wrap_debug('Parsing Authors')\n @cast_return(dict) # to dictionary\n @to_list # Cast Generator To List\n def _parse_author_artist_and_summary(self, info):\n begin_tag = info.find('b')\n title, contents = begin_tag.text, ''\n \n while begin_tag.nextSibling is not None:\n begin_tag = begin_tag.nextSibling\n \n if isinstance(begin_tag, Tag):\n if title.strip() != '': # when not empty\n yield (title, contents.strip('\\n'))\n \n title, contents = begin_tag.text, ''\n else:\n text = str(begin_tag)\n \n if str(begin_tag).startswith(': '):\n text = text[2:]\n \n contents += text + '\\n'","sub_path":"mangagrabber/sites/jaiminisbox/responses/contents.py","file_name":"contents.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"3675356","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /opt/miniconda3/envs/gmxbatch/lib/python3.7/site-packages/gmxbatch/system.py\n# Compiled at: 2020-03-15 09:03:11\n# Size of source mod 2**32: 3909 bytes\nfrom typing import List, Optional\nfrom .conffiles import Coordinates\nfrom .forcefields import ForceField\nfrom .indexgroups import IndexGroups\nfrom .intermolecularinteractions import IntermolecularInteractions\nfrom .moleculetype import MoleculeType\nfrom .templating import jinjaenv\n\nclass System:\n __doc__ = 'Represents the system studied by molecular dynamics.\\n\\n The most important attributes are:\\n - conf: the current state (conformation, coordinate set etc.), instance of gmxbatch.conffiles.Coordinates\\n - name: the name of this system (str)\\n - moleculetypes: ordered list of MoleculeType instances: describing the kind and count of molecules\\n - forcefield: force field settings (instance of ForceField)\\n - indexgroups: instance of IndexGroups\\n - intermolecularinteractions: instance of IntermolecularInteractions\\n '\n conf: Coordinates\n name: str\n moleculetypes: List[MoleculeType]\n forcefield: ForceField\n indexgroups: IndexGroups\n intermolecularinteractions: IntermolecularInteractions\n\n def __init__(self, name: str, forcefield: ForceField, conf: Coordinates, moleculetypes: List[MoleculeType], indexgroups: Optional[IndexGroups]=None):\n self.conf = conf\n self.name = name\n self.moleculetypes = moleculetypes\n self.forcefield = forcefield\n self.indexgroups = IndexGroups() if indexgroups is None else indexgroups\n self.intermolecularinteractions = IntermolecularInteractions()\n\n def writeTopology(self, topologyfile: str):\n \"\"\"Write the topology file of the present state.\n\n :param topologyfile: file name to write the topology to.\n :type topologyfile: str\n \"\"\"\n template = jinjaenv.get_template('topol.jinja2')\n with open(topologyfile, 'wt') as (f):\n for chunk in template.generate(param_itp=(self.forcefield.itp), moleculetypes=(self.moleculetypes), systemname=(self.name),\n moltype_itps={mt.itpfile for mt in self.moleculetypes},\n intermolecularinteractions=(self.intermolecularinteractions)):\n f.write(chunk)\n\n def _match_molecule(self, moltype: MoleculeType, atindex: int) -> int:\n \"\"\"See how many times a molecule definition fits into a coordinate set starting at a given index\n\n :param moltype: molecule definition\n :type moltype: MoleculeType\n :param atindex: start index of the coordinate set\n :type atindex: int\n :return: number of matches\n :rtype: int\n \"\"\"\n count = 0\n while atindex + len(moltype.atoms) > len(self.conf.atoms):\n return count\n for i, itpatom in enumerate(moltype.atoms):\n if itpatom.name != self.conf.atoms[(atindex + i)]['name'] or itpatom.resn != self.conf.atoms[(atindex + i)]['resn']:\n return count\n else:\n count += 1\n atindex += len(moltype.atoms)\n\n def countMolecules(self):\n \"\"\"Update the topology by counting the molecules in the coordinate set.\n \"\"\"\n matched_until = 0\n for mt in self.moleculetypes:\n matches = self._match_molecule(mt, matched_until)\n mt.count = matches\n matched_until += matches * len(mt.atoms)\n\n if matched_until < len(self.conf.atoms):\n raise ValueError('Ran out of molecule definitions while matching atoms in the coordinate set')","sub_path":"pycfiles/gmxbatch-0.0.2.dev0.linux-x86_64.tar/system.cpython-37.py","file_name":"system.cpython-37.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"206432927","text":"import math\nimport os\nfrom fnmatch import fnmatch\nfrom datetime import datetime\nfrom tqdm import tqdm\n\ndef fileload(filename):\n file_pth = os.path.dirname(__file__) + '/' + filename\n file_in = os.open(file_pth, os.O_BINARY)\n file_size = os.stat(file_in)[6]\n data = os.read(file_in, file_size)\n os.close(file_in)\n return data\n\ndef cal_pr(data):\n pro_dic = {}\n for i in tqdm(data):\n pro_dic[i] = data.count(i)\n sym_pro = []\n accum_pro = []\n keys = []\n accum_p = 0\n data_size = len(data)\n for k in sorted(pro_dic, key=pro_dic.__getitem__, reverse=True):\n sym_pro.append(pro_dic[k])\n keys.append(k)\n for i in sym_pro:\n accum_pro.append(accum_p)\n accum_p += i\n accum_pro.append(data_size)\n tmp = 0\n for k in sorted(pro_dic, key=pro_dic.__getitem__, reverse=True):\n pro_dic[k] = [pro_dic[k], accum_pro[tmp]]\n tmp += 1\n return pro_dic, keys, accum_pro \n\ndef encode(data, pro_dic):\n t_begin = datetime.now()\n print(\"Encoding begins.\")\n print(\"Please wait...\")\n C_up = 0\n A_up = A_down = C_down = 1\n data_size = len(data)\n for i in tqdm(range(data_size)): \n C_up = C_up * data_size + A_up * pro_dic[data[i]][1]\n C_down = C_down * data_size\n A_up *= pro_dic[data[i]][0]\n A_down *= data_size\n L = len(data) * math.log2(data_size) - math.log2(A_up)\n L = math.ceil(L)\n print(\"Generating codes...\")\n bin_C = float_dec2bin(C_up, C_down, L)\n acode = bin_C[0:L]\n #acode = acode.replace(acode[-1], '1')\n t_end = datetime.now()\n print(\"Encoding succeeded.\")\n print(\"Encoding lasts %d seconds.\" % (t_end - t_begin).seconds)\n return C_up, C_down, acode\n\ndef decode(C_up, C_down, pro_dic, keys, accum_pro, data_size):\n t_begin = datetime.now()\n print(\"Decoding begins.\")\n print(\"Please wait...\")\n byte_list = []\n for i in tqdm(range(data_size)):\n k = binarysearch(accum_pro, C_up * data_size / C_down)\n #if k == len(accum_pro) - 1:\n #k -= 1\n #print(len(keys))\n #print(k)\n key = keys[k]\n byte_list.append(key)\n C_up = C_up * data_size - C_down * pro_dic[key][1]\n C_down = C_down * data_size\n C_up *= data_size\n C_down *= pro_dic[key][0]\n t_end = datetime.now()\n print(\"Decoding succeeded.\")\n print(\"Decoding lasts %d seconds.\" % (t_end - t_begin).seconds)\n return byte_list\n\ndef binarysearch(pro_list, target):\n low = 0\n high = len(pro_list) - 1\n #print(target)\n #print(pro_list)\n #print(len(pro_list))\n if pro_list[0] <= target <= pro_list[-1]:\n while high >= low:\n middle = int((high + low) / 2)\n if (pro_list[middle] < target) & (pro_list[middle+1] < target):\n low = middle + 1\n elif (pro_list[middle] > target) & (pro_list[middle-1] > target):\n high = middle - 1\n elif (pro_list[middle] < target) & (pro_list[middle+1] > target):\n return middle\n elif (pro_list[middle] > target) & (pro_list[middle-1] < target):\n return middle - 1\n elif (pro_list[middle] < target) & (pro_list[middle+1] == target):\n return middle + 1\n elif (pro_list[middle] > target) & (pro_list[middle-1] == target):\n return middle - 1\n elif pro_list[middle] == target:\n return middle\n return middle\n else:\n return -1\n\ndef int_bin2dec(bins):\n dec = 0\n for i in range(len(bins)):\n dec += int(bins[i]) * 2 ** (len(bins) - i -1)\n return dec\n\ndef float_bin2dec(bins):\n dec_up = 0\n for i in range(len(bins)):\n dec_up += int(bins[i]) * 2 ** (len(bins) - i - 1) \n dec_down = 2 ** len(bins)\n return dec_up, dec_down\n \ndef float_dec2bin(x_up, x_down, L):\n bins = \"\"\n while ((x_up != x_down) & (len(bins) < L)):\n x_up *= 2\n if x_up > x_down:\n bins += \"1\"\n x_up -= x_down\n elif x_up < x_down:\n bins += \"0\"\n else:\n bins += \"1\"\n return bins\n\ndef filesave(data_after, filename):\n file_pth = os.path.dirname(__file__) + '/' + filename\n if (fnmatch(filename, \"*_encode.*\") == True):\n byte_list = []\n byte_num = int(len(data_after) / 8)\n #print(byte_num)\n for i in tqdm(range(byte_num)):\n byte_list.append(int_bin2dec(data_after[8*i:8*(i+1)]))\n #print(byte_list)\n #byte_dic = {}\n #for j in byte_list:\n # byte_dic[j] = byte_list.count(j)\n #print(byte_dic)\n file_open = os.open(file_pth, os.O_WRONLY | os.O_CREAT | os.O_BINARY)\n #print(len(bytes(byte_list)))\n os.write(file_open, bytes(byte_list))\n os.close(file_open)\n return byte_num\n else:\n file_open = os.open(file_pth, os.O_WRONLY | os.O_CREAT | os.O_BINARY)\n os.write(file_open, data_after)\n os.close(file_open)\n\ndef code_efficiency(pro_dic, data_size, bit_num):\n entropy = 0\n for k in pro_dic.keys():\n entropy += (pro_dic[k][0] / data_size) * (math.log2(data_size) - math.log2(pro_dic[k][0]))\n #print(entropy)\n ave_length = bit_num / data_size\n #print(ave_length)\n code_efficiency = entropy / ave_length\n print(\"The coding efficiency is %.2f%%\" % (code_efficiency * 100))\n\ndef acode():\n filename = [\"诺贝尔化学奖\", \"脑机接口新突破\"]\n filetype = [\".txt\", \".docx\"]\n for i in range(1, len(filename)):\n print(40 * \"-\")\n print(\"Loading file:\", filename[i] + filetype[i])\n data = fileload(filename[i] + filetype[i])\n data_size = len(data)\n pro_dic, keys, accum_pro = cal_pr(data)\n acode_ls = \"\"\n C_up, C_down, acode = encode(data, pro_dic)\n acode_ls += acode\n if len(acode_ls) % 8 != 0:\n #print(len(acode_ls) % 8)\n tmp = acode_ls[-(len(acode_ls) % 8):].zfill(8)\n acode_ls = acode_ls[0:-(len(acode_ls) % 8)]\n acode_ls += tmp\n #print(len(acode_ls))\n #acode_ls[-(len(acode_ls) % 8) :].zfill(8)\n #print(len(acode_ls))\n print(\"Saving encoding file...\")\n codebyte_num = filesave(acode_ls, filename[i]+'_encode.acode')\n print(\"Encoding file has been saved.\")\n print(\"The compressing rate is %.2f%%\" % ((codebyte_num / data_size) * 100))\n code_efficiency(pro_dic, data_size, len(acode_ls))\n print()\n\n print(\"Loading file:\", filename[i] + '_encode.acode')\n codes = fileload(filename[i]+'_encode.acode')\n #print(len(codes))\n bitstream = \"\"\n for code in codes:\n bitstream += bin(code)[2:].zfill(8)\n #print(new_dic)\n #print(len(bitstream))\n C_up, C_down = float_bin2dec(bitstream)\n decodebyte_ls = decode(C_up, C_down, pro_dic, keys, accum_pro, data_size)\n errornum = 0\n for j in range(data_size):\n if data[j] != decodebyte_ls[j]:\n errornum += 1\n print(j, data[j], decodebyte_ls[j])\n print(\"Error byte num:\", errornum)\n print(\"Saving decoding file...\")\n filesave(bytes(decodebyte_ls), filename[i] + '_decode'+ filetype[i])\n print(\"Decoding file has been saved.\")\n\nif __name__ == \"__main__\":\n acode()","sub_path":"v1.py","file_name":"v1.py","file_ext":"py","file_size_in_byte":7356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"61003096","text":"\n\nclass Pizza:\n def __init__(self, id=None, name=None, price=None, ingredients=None, img=None):\n self.id = id\n self.name = name\n self.price = price\n self.ingredients = ingredients\n self.img = img\n\n @classmethod\n def load(cls, obj):\n pizza = Pizza(id=obj['id'],\n name=obj['name'],\n price=obj['price'],\n ingredients=obj['ingredients'],\n img=obj['img']\n )\n return pizza\n\n def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'price': self.price,\n 'ingredients': self.ingredients,\n 'img': self.img,\n }\n\n","sub_path":"pizza.py","file_name":"pizza.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"409623185","text":"\"\"\"\n给定一个单链表,其中的元素按升序排序,将其转换为高度平衡的二叉搜索树。\n\n本题中,一个高度平衡二叉树是指一个二叉树每个节点 的左右两个子树的高度差的绝对值不超过 1。\n\n示例:\n\n给定的有序链表: [-10, -3, 0, 5, 9],\n\n一个可能的答案是:[0, -3, 9, -10, null, 5], 它可以表示下面这个高度平衡二叉搜索树:\n\n 0\n / \\\n -3 9\n / /\n -10 5\n\n\"\"\"\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n # 利用快慢指针来确定链表的中间节点\n def findMiddle(self, head):\n # pre指针指向前半部分的最后一个节点\n pre = None\n # 慢指针\n slow = head\n # 快指针\n fast = head\n\n while fast and fast.next:\n pre = slow\n # 慢指针每次走一步\n slow = slow.next\n # 快指针每次走两步\n fast = fast.next.next\n # 当上述循环结束后,fast指针走到了最后,慢指针走到了中间\n if pre:\n # pre指针的下一个节点是slow,将pre的next指针指向None,从而断开链表\n pre.next = None\n # slow指针现在指向的就是链表的中间节点\n return slow\n\n def sortedListToBST(self, head: ListNode) -> TreeNode:\n # 判断head是否存在\n if not head:\n return None\n # 调用findMiddle函数,返回中间节点,命名为mid\n mid = self.findMiddle(head)\n # 创建一个树节点,值为mid\n node = TreeNode(mid.val)\n # 判断head是否等于mid。即链表的长度是否为一的特殊情况\n if head == mid:\n return node\n # 递归调用函数,左子树的根节点是head\n node.left = self.sortedListToBST(head)\n # 递归调用函数,右子树的根节点是mid.next\n node.right = self.sortedListToBST(mid.next)\n\n return node\n\n\nclass Solution2:\n def mapListToValues(self, head):\n \"\"\"将链表转换为list形式,快速得到中间节点\"\"\"\n vals = []\n while head:\n vals.append(head.val)\n head = head.next\n return vals\n\n def sortedListToBST(self, head):\n\n values = self.mapListToValues(head)\n\n def convertListToBST(l, r):\n # l(left)大于r(right)的情况\n if l > r:\n return None\n # 否则,得到中间节点的位置\n mid = (l + r) // 2\n # 初始化根节点,节点的值为mid的值\n node = TreeNode(values[mid])\n # 当 l== r 的情况\n if l == r:\n return node\n # 递归调用\n node.left = convertListToBST(l, mid - 1)\n node.right = convertListToBST(mid + 1, r)\n return node\n\n return convertListToBST(0, len(values) - 1)\n","sub_path":"链表/109-有序链表转换为二叉搜索树.py","file_name":"109-有序链表转换为二叉搜索树.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"474350618","text":"from app import app\nfrom flask import url_for, redirect, request, render_template, Markup\nimport markdown\nfrom loader import DataStore\nfrom config import *\nfrom helper import hash_check\n\n# Initilize Loading files from git repo\nDS = DataStore()\n\n\"\"\"\nWhen triggered by Github repo webhook, verifies if the request is valid and if it is, the data stored inside the git repo is pulled down and the DataStore is reloaded\n\"\"\"\n@app.route('/_reload', methods=['POST'])\ndef reload_data():\n\tif request.headers.get('X-Hub-Signature', None) != None:\n\t\tgh_sha1 = request.headers.get('X-Hub-Signature')\n\t\tgh_payload = request.data\n\t\tsecret = GIT_REPO_SECRET\n\t\tif hash_check(gh_sha1, gh_payload, secret) == True:\n\t\t\tDS.reload()\n\t\treturn redirect(url_for('index'))\n\telse:\n\t\tprint('Recieved a post request to reload, but not from github')\n\t\treturn redirect(url_for('index'))\n\n\n\n\"\"\"\nIndex view, a.k.a /root\n\"\"\"\n@app.route('/')\ndef index():\n\treturn render_template('index.html')\n\n\n\n\"\"\"\n\tRedirects to Resume.pdf hosted on Dropbox\n\"\"\"\n@app.route('/resume')\ndef resume():\n\t# return render_template('resume.html')\n\treturn redirect('https://www.dropbox.com/s/1fm7p9kr8mjbz8m/resume.pdf?dl=0')\n\n\n\n\"\"\"\n\tList of completed and going on projects\n\"\"\"\n@app.route('/projects')\ndef projects_page():\n\treturn redirect('https://github.com/snehesht')\n\t# return render_template('projects.html')\n\n\n\n\"\"\"\n\tAbout myself\n\"\"\"\n@app.route('/about')\ndef about_page():\n\treturn render_template('about.html')\n\n\n\"\"\"\n\tIndex of all blogposts\n\"\"\"\n@app.route('/blog/')\n@app.route('/blog')\ndef blog_index_page():\n\tdata = DS.get_metadata()\n\treturn render_template('blog.html', content=data)\n\n\n@app.route('/blog//')\n@app.route('/blog/')\ndef blog_post(url_slug):\n\tdata = DS.get_data()\n\tmetadata = DS.get_metadata()\n\tbp = {}\n\ttry:\n\t\tfor item in metadata:\n\t\t\t# Search for metadata of the blogpost with that url\n\t\t\tif item['url'] == url_slug:\n\t\t\t\tbp = item\n\texcept Exception as e:\n\t\t# No such blogpost\n\t\treturn redirect(url_for('index_page'))\n\tfinally:\n\t\traw_content = data[bp['file']]\n\t\tprocessed_content = Markup(markdown.markdown(raw_content))\n\t\treturn render_template('blog_post.html', content=processed_content,metadata=bp)","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"322660074","text":"from models.user import User\nimport db\nfrom flask import Flask, request, json, make_response\nfrom scraper import * # Web Scraping utility functions for Online Clubs with Penn.\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef main():\n return \"Welcome to Penn Club Review!\"\n\n\n@app.route('/api')\ndef api():\n return \"Welcome to the Penn Club Review API!.\"\n\n\n@app.route('/api/clubs', methods=[\"GET\"])\ndef read_clubs():\n clubs = db.get_clubs()\n if clubs is not None:\n return json.jsonify(clubs)\n else:\n return \"No clubs found\", 400\n\n\n@app.route('/api/clubs', methods=[\"POST\"])\ndef create_one_club():\n msg = db.create_one_club(request.form)\n if msg == \"success\":\n return msg, 201\n else:\n return msg, 400\n\n\n@app.route('/api/clubs/similar', methods=[\"POST\"])\ndef create_one_club():\n if \"club_name\" in request.form:\n n = request.form[\"n\"] if \"n\" in request.form else 3\n msg = db.get_similar_club(request.form[\"club_name\"], n)\n if type(msg) == str:\n return msg, 400\n else:\n return json.jsonify(msg)\n return \"club_name is required to favorite\", 400\n\n\n@app.route('/api/user/', methods=[\"GET\"])\ndef read_one_user(username):\n user_data = db.get_user(username)\n if type(user_data) == str:\n return user_data, 400\n else:\n return json.jsonify(user_data)\n\n\n@app.route(\"/api/favorite\", methods=[\"POST\"])\ndef favorite():\n if \"username\" in request.form:\n if \"club_name\" in request.form:\n msg = db.favorite(request.form[\"username\"], request.form[\"club_name\"])\n if msg == \"success\":\n return msg, 200\n else:\n return msg, 400\n return \"club_name is required to favorite\", 400\n if \"club_name\" in request.form:\n return \"username is required to favorite\", 400\n\n return \"username and club_name are required to favorite\", 400\n\n\n@app.route(\"/api/favorite\", methods=[\"POST\"])\ndef favorite():\n if \"username\" in request.form:\n if \"club_name\" in request.form:\n msg = db.favorite(request.form[\"username\"], request.form[\"club_name\"])\n if msg == \"success\":\n return msg, 200\n else:\n return msg, 400\n return \"club_name is required to favorite\", 400\n if \"club_name\" in request.form:\n return \"username is required to favorite\", 400\n\n return \"username and club_name are required to favorite\", 400\n\n\n@app.route(\"/api/favorite\", methods=[\"POST\"])\ndef favorite():\n if \"username\" in request.form:\n if \"club_name\" in request.form:\n msg = db.unfavorite(request.form[\"username\"], request.form[\"club_name\"])\n if msg == \"success\":\n return msg, 200\n else:\n return msg, 400\n return \"club_name is required to favorite\", 400\n\n if \"club_name\" in request.form:\n return \"username is required to favorite\", 400\n\n return \"username and club_name are required to favorite\", 400\n\n\nif __name__ == '__main__':\n db.populate()\n app.run(port=3000)\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"50731293","text":"'''\nCode to compare multiple fits from Simmulated Annealing together.\n\nCan get the distributions/boxplots of different parameters amongst the different runs\n\nAlso built-in a sweep to find the bounds for the internal forward rate based on the averaged (free-)energy landscape\n'''\nimport importlib as imp\nimport matplotlib.pylab as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nsns.set_style('ticks');\ncurrent_colors = sns.color_palette()\n\nimport sys\nsys.path.append('../code_general/')\nfrom read_model_ID import unpack_parameters\nimport CRISPR_free_energy_landscape_Diewertje as FreeEnergy\nimp.reload(FreeEnergy);\nsys.path.append('../code_Boyle/')\nimport plotting_Boyle_Diewertje as plt_B\nimport CRISPR_dCas9_binding_curve_Boyle as dCas9\nimp.reload(dCas9);\n\nfrom scipy import optimize\nimport Boyle_data_processing as Bdata\nimp.reload(Bdata);\n\n#import CRISPR_dCas9_binding_curve_Boyle as dCas9\n#reload(dCas9);\nsys.path.append('../code_Pclv/')\nimport CRISPR_Kinetic_model_Diewertje as Pclv\nimp.reload(Pclv);\n\nimport get_parameters_fit_Diewertje as gpf\n\nimport Weighted_Average_Diewertje as WA\n\nimport get_parameters_fit_Diewertje as getParm\n\n\n########################\n\ndef P(kon,koff,t):\n return kon/(kon+koff) * (1 - np.exp(-(kon+koff)*t))\n\ndef least_squares_line_through_origin(x_points, y_points):\n return np.sum( x_points*y_points )/np.sum( x_points*x_points )\n\n\ndef association(kon,koff):\n times = np.array([0.5,1.0,1.5])\n fluoresence = P(kon,koff,times)\n k_asso = least_squares_line_through_origin(x_points=times, y_points=fluoresence)\n return k_asso\n\ndef find_on_target(kon,k_asso):\n return k_asso - association(kon,koff=0.0)\n\ndef calc_on_rate():\n '''\n using the measured association rate for the on-target, invert the relation between association and on-rate\n through root-finding.\n\n uses 'brentq()' algorithm from scipy to find the root(s).\n\n :return: estimated on-target on rate\n '''\n\n BoyleData = Bdata.read()\n OnTarget = BoyleData[BoyleData.MM_pos.apply(len)==0]\n asso_rate_OT = float(OnTarget.on_slope)*1000\n kOT = optimize.brentq(find_on_target,a=0.01, b=1.0,args=(asso_rate_OT))\n return kOT/1000.\n\n\ndef process_SA_fits(simset,Nparams=44, model_id='init_limit_general_energies_v2'):\n '''\n Load the desired fits and store into dataframes:\n 1. the value of epsilon_C at every position\n 2. the value of epsilon_I at every position\n 3. the value of the energy_landscape at every position\n 4. the bound free-energy for the on-target at every position\n 5. the estimated on-rate for the on-target. Both with and without assuming the solution and PAM have equillibrated\n\n :param simset: Let every entry from simset be a filename including path\n\n :return: 6 separate pandas dataframes\n '''\n\n\n # prepare some stuff for the data frames:\n column_names = ['sim', 'PAM']\n for n in range(1, 21):\n column_names.append(str(n))\n\n match_rows = []\n mismatch_rows = []\n rates_rows = []\n landscape_rows = []\n FreeEnergy_rows = []\n fast_Rloop_rows = []\n\n for filename in simset:\n parameters = gpf.load_simm_anneal(filename, Nparams)\n #parameters[0]=3.5 # If we want to fix epsilon PAM\n epsilon, fwd_rates = unpack_parameters(parameters, model_id)\n Energy_landscape, FreeEnergy_landscape = FreeEnergy.plot_free_energy_landscape(parameters, model_id,\n show_plot=False);\n\n match_data = {}\n match_data['sim'] = filename\n for i, key in enumerate(column_names[1:]):\n match_data[key] = epsilon[i]\n match_rows.append(match_data)\n\n mismatch_data = {}\n mismatch_data['sim'] = filename\n for i, key in enumerate(column_names[2:]):\n mismatch_data[key] = epsilon[21 + i]\n mismatch_rows.append(mismatch_data)\n\n rates_data = {}\n rates_data['sim'] = filename\n rates_data['R1_to_PAM'] = fwd_rates[1] * np.exp(-epsilon[1])\n rates_data['PAM_to_sol'] = fwd_rates[0] * np.exp(epsilon[0])\n for i, key in enumerate(['sol_to_PAM', 'PAM_to_R1', 'internal']):\n rates_data[key] = fwd_rates[i]\n rates_rows.append(rates_data)\n\n landscape_data = {}\n landscape_data['sim'] = filename\n for i, key in enumerate(['sol'] + column_names[1:]):\n landscape_data[key] = Energy_landscape[i]\n landscape_rows.append(landscape_data)\n\n FreeEnergy_data = {}\n FreeEnergy_data['sim'] = filename\n for i, key in enumerate(column_names[1:]):\n FreeEnergy_data[key] = FreeEnergy_landscape[i]\n FreeEnergy_rows.append(FreeEnergy_data)\n\n rate_to_cleave = 10 ** 9\n Delta = Pclv.translate_binding_to_cleaving(parameters, model_id, rate_to_cleave, mismatch_positions=[])\n P = Pclv.Pclv(Delta[1:])\n kPR = fwd_rates[1] * P\n kSP = 0.1 * fwd_rates[0]\n kPS = kSP * np.exp(epsilon[0] + np.log(10.0))\n k_OT = kPR * kSP / (kPR + kSP + kPS)\n kinetic = k_OT * 1000\n\n # Assume PAM and solution equillibrates: Initiating the R-loop is limmiting\n thermodynamicPAM = kPR / (1.0 + np.exp(+epsilon[0] + np.log(10.0))) * 1000\n\n # Assume that PAM and (first) R-loop state equillibrate: Binding from solution is limmiting\n\n\n P2 = Pclv.Pclv(Delta[2:])\n kf = fwd_rates[2]\n E1 = epsilon[1]\n k_OT = (kSP * kf*P2/np.exp(E1))/(kSP + kPS + kf*P2/np.exp(E1))\n thermodynamicR = k_OT*1000\n\n fast_Rloop_data = {}\n fast_Rloop_data['sim'] = filename\n fast_Rloop_data['kinetic'] = kinetic\n fast_Rloop_data['eq_PAM'] = thermodynamicPAM\n fast_Rloop_data['eq_PR'] = thermodynamicR\n fast_Rloop_rows.append(fast_Rloop_data)\n\n matches = pd.DataFrame(match_rows, columns=column_names)\n mismatches = pd.DataFrame(mismatch_rows, columns=['sim'] + [str(i) for i in range(1, 21)])\n rates = pd.DataFrame(rates_rows, columns=['sim', 'sol_to_PAM', 'PAM_to_sol', 'PAM_to_R1', 'R1_to_PAM', 'internal'])\n landscape = pd.DataFrame(landscape_rows, columns=['sim', 'sol'] + column_names[1:])\n free_energy = pd.DataFrame(FreeEnergy_rows, columns=column_names)\n fast_Rloop = pd.DataFrame(fast_Rloop_rows, columns=['sim', 'kinetic', 'eq_PAM','eq_PR'])\n\n matches.set_index('sim', inplace=True)\n mismatches.set_index('sim', inplace=True)\n rates.set_index('sim', inplace=True)\n landscape.set_index('sim', inplace=True)\n free_energy.set_index('sim', inplace=True)\n fast_Rloop.set_index('sim', inplace=True)\n return matches, mismatches, rates, landscape, free_energy, fast_Rloop\n\n\ndef average_solution(simset):\n '''\n use some identifiers to choose the simulations I want to average over\n\n to easily toggle between sims in different folders maybe make a variable 'sim_set'\n and use:\n for sim in sim_set:\n load simulation\n ...\n '''\n matches, mismatches, rates, landscape, free_energy, fast_Rloop = process_SA_fits(simset)\n\n # 1) get average energy landscape:\n landscape_avg = np.array(landscape.mean())\n\n # 2) get average mismatch penalties:\n epsI_avg = np.array(mismatches.mean())\n\n # 3) get average on-target binding rate:\n kOT_avg = float(fast_Rloop.mean()['kinetic'])\n\n return landscape_avg, epsI_avg, kOT_avg/1000.0\n\n\ndef median_solution(simset):\n '''\n use some identifiers to choose the simulations I want to average over\n\n to easily toggle between sims in different folders maybe make a variable 'sim_set'\n and use:\n for sim in sim_set:\n load simulation\n ...\n '''\n matches, mismatches, rates, landscape, free_energy, fast_Rloop = process_SA_fits(simset)\n\n # 1) get average energy landscape:\n landscape_avg = np.array(landscape.median())\n\n # 2) get average mismatch penalties:\n epsI_avg = np.array(mismatches.median())\n\n # 3) get average on-target binding rate:\n kOT_avg = float(fast_Rloop.median()['kinetic'])\n # kOT_avg = float(fast_Rloop.median()['thermodynamic'])\n\n return landscape_avg, epsI_avg, kOT_avg/1000.0\n\n\n\n\ndef calc_rate_PAM_to_Rloop(energy_landscape, kf, kOT, kSP=1000.0):\n '''\n Get kPR from kOT and the two other fitted forward rates.\n\n Adjusted to use exact equation involving both kf and kSP\n :param energy_landscape:\n :param kf:\n :param kOT:\n :param kSP:\n :return:\n '''\n\n\n Epsilon = np.diff(energy_landscape)\n Delta = -np.diff(energy_landscape)\n Delta[0] *= -1\n\n E_SP = Epsilon[0]\n # ---- associaton rate data is taken at 1nM, we report the parameter values at 10nM ----\n kSP *= 0.1\n Kd = np.exp(+E_SP + np.log(10.0))\n\n P2 = Pclv.Pclv(Delta[2:])\n c = kOT * (1 + Kd) /(1 - kOT/kSP)\n E_RP = Epsilon[1]\n alpha = 1.0 /(kf * P2) * np.exp(+E_RP) * c\n kPR = c * (1 - alpha) ** (-1)\n return kPR , alpha\n\n\ndef test_rates(kf, kOT, energy_landscape, epsilon_I,\n xdata, ydata, yerr, model_id='init_limit_general_energies_v2',\n kSP=1000.0):\n '''\n calculate the chi-squared value based on the single-mismatch association rate data\n for a set of particular values of the internal forward rate and the rate from solution to PAM\n '''\n\n # 1) Use kOT, the energy landscape, kf and kSP to determine kPR:\n kPR, _ = calc_rate_PAM_to_Rloop(energy_landscape, kf, kOT, kSP)\n\n # 2) Calculate chi-squared with single mismatch off-targets:\n Epsilon = np.diff(energy_landscape)\n Epsilon[1:] *= -1\n\n new_parameters = list(Epsilon) + list(epsilon_I)\n\n new_parameters.append(np.log10(kSP))\n new_parameters.append(np.log10(kPR))\n new_parameters.append(np.log10(kf))\n new_parameters = np.array(new_parameters)\n\n ONtarget_occupancy = 1.0\n\n V = 0\n for i in range(len(xdata)):\n V += dCas9.calc_Chi_square(new_parameters, xdata[i], ydata[i], yerr[i], ONtarget_occupancy,\n model_id=model_id)\n\n return V, new_parameters\n\n\ndef optimize_internal_forward_rate(simset, forward_rates, mode='mean',kSP=1000.0):\n xdata, ydata, yerr = Bdata.prepare_multiprocessing(use_single_mm_only=True,\n use_on_rate=True,\n use_off_rate=False,\n use_occupancy=False)\n\n if mode == 'mean':\n landscape_avg, epsI_avg, kOT = average_solution(simset)\n elif mode =='median':\n landscape_avg, epsI_avg, kOT = median_solution(simset)\n\n # kOT = calc_on_rate()\n _, lower_bnd_kf = calc_rate_PAM_to_Rloop(kSP=kSP, kf=1.0, kOT=kOT, energy_landscape=landscape_avg)\n V = []\n kf_vals = []\n for kf in forward_rates:\n if kf < lower_bnd_kf:\n continue\n kf_vals.append(kf)\n v, _ = test_rates(kf=kf, kSP=kSP,\n kOT=kOT,\n energy_landscape=landscape_avg,\n epsilon_I=epsI_avg,\n xdata=xdata,\n ydata=ydata,\n yerr=yerr)\n V.append(v)\n\n\n # Now find the optimum:\n kf_opt = kf_vals[np.argmin(V)]\n V_opt, parameters_opt = test_rates(kf=kf_opt,\n kSP=kSP,\n kOT=kOT,\n energy_landscape=landscape_avg,\n epsilon_I=epsI_avg,\n xdata=xdata,\n ydata=ydata,\n yerr=yerr)\n\n return V, kf_vals, kf_opt, parameters_opt, V_opt\n\n\ndef grid_search_forward_rates(simset, int_forward_rates, sol_to_PAM_rates,\n save_to_file = True,\n mode='median',\n today='30/11/2018'):\n '''\n 2D grid search to find both internal forward rate and rate from solution to PAM based on the average landscape\n '''\n\n # --- for every value of kSP, perform a 1D grid search along kf ----\n\n grid_V = np.nan * np.ones((len(int_forward_rates), len(sol_to_PAM_rates)))\n grid_kfvals = np.nan * np.ones((len(int_forward_rates), len(sol_to_PAM_rates)))\n\n # partial optimum along kf:\n grid_kfopt = []\n grid_Vopt = []\n grid_parameters = []\n for i, kSP in enumerate(sol_to_PAM_rates):\n V, kf_vals, kf_opt, parameters_opt, V_opt = optimize_internal_forward_rate(simset, int_forward_rates, mode,\n kSP=kSP)\n\n grid_V[-len(V):, i] = V\n grid_kfvals[-len(kf_vals):, i] = kf_vals\n grid_kfopt.append(kf_opt)\n grid_Vopt.append(V_opt)\n grid_parameters.append(parameters_opt)\n\n # --- find optimum on grid -----\n Vopt = np.min(grid_Vopt)\n kf_opt = grid_kfopt[np.argmin(grid_Vopt)]\n kSP_opt = sol_to_PAM_rates[np.argmin(grid_Vopt)]\n parameters_opt = grid_parameters[np.argmin(grid_Vopt)]\n\n if save_to_file:\n #---- store parameters into text file ----\n fit_info = str(len(simset)) + ' in total , folder 25_10_2018/sims: 1 - 150 & 19_10_2018'\n model_id = 'init_limit_general_energies_v2'\n file_params = '../data/25_10_2018/' + mode + '_landscape_Boyle_2Dgrid.txt'\n write_parameters(parameters_opt, model_id, file_params, today, fit_info, mode)\n\n # ---- store grid points into Excel file ------\n filename = '../data/25_10_2018/grid_search_' + mode + today.replace('/','_') +'.xlsx'\n write_grid(grid_V, grid_kfvals, Vopt, kf_opt, kSP_opt, int_forward_rates, sol_to_PAM_rates, filename)\n\n\n return grid_V, grid_kfvals, Vopt, kf_opt, kSP_opt, parameters_opt\n\n\ndef write_parameters(parameters, model_id, filename,\n today, fit_info, mode):\n\n O = open(filename, 'w')\n O.write('# date of modification: ' + today + '\\n')\n O.write('# model ID: '+ model_id + '\\n')\n O.write('# SA fits used: ' + fit_info + '\\n' )\n O.write('# analysis: '+ mode + '\\n')\n\n for param in parameters:\n O.write(str(param) + '\\n')\n O.close()\n return\n\n\ndef write_grid(V, kf_vals, Vopt, kf_opt, kSP_opt, forward_rates,binding_rates, filename):\n # ------ calculated chi-squared values on grid points ------------\n df = pd.DataFrame(V)\n convert_col_names = {}\n for i in range(len(binding_rates)):\n convert_col_names[i] = binding_rates[i]\n\n convert_row_names = {}\n for j in range(len(forward_rates)):\n convert_row_names[j] = forward_rates[j]\n df.rename(index=convert_row_names, columns=convert_col_names, inplace=True)\n\n # --- accepted forward rates -------\n df2 = pd.DataFrame(kf_vals)\n df2.rename(index=convert_row_names, columns=convert_col_names, inplace=True)\n\n # ----- optimal set of kf and kSP on the grid ------\n df3 = pd.DataFrame()\n df3['kf'] = [kf_opt]\n df3['kSP'] = [kSP_opt]\n df3['V'] = [Vopt]\n\n # ----- save it all into an excel file -----\n ExcelFile = pd.ExcelWriter(path=filename)\n df.to_excel(ExcelFile, sheet_name='chi_squared')\n df2.to_excel(ExcelFile, sheet_name='forward_rates')\n df3.to_excel(ExcelFile, sheet_name='optimum')\n ExcelFile.save()\n ExcelFile.close()\n return\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n###############################################################\ndef replace_lower_triangle(M):\n '''\n sets the values in the lower triangle of matrix M equal to 1.\n The model predictions contain both mismatch config (i,j) and (j,i).\n Setting half of the association rates equal to 1.0 will prevent those duplicates\n from contributing to the selection done below\n :param M:\n :return:\n '''\n K = M\n for i in range(len(M)):\n for j in range(len(M)):\n if i > j:\n K[i, j] = 1\n return K\n\n\n\ndef difference_model_predictions(model, reference):\n '''\n Calculate average distance between two model predictions per datapoint\n :param model:\n :param reference:\n :return:\n '''\n reference = replace_lower_triangle(reference)\n model = replace_lower_triangle(model)\n N = len(reference)\n total_nmbr_of_points = N * (N + 1) * 0.5\n sum_difference = np.sum(np.abs(model - reference) / (reference))\n diff = sum_difference/total_nmbr_of_points\n return diff\n\ndef select_on_prediction_WA(simset,\n percentage=0.1,\n Nparams=43,\n model_id='general_energies_no_kPR',\n path='../Data_Boyle/',\n replica='1'):\n # in my case replica is filename!\n scores = []\n for sim in simset:\n #print sim\n parameters = getParm.load_simm_anneal(sim, Nparams)\n score, _, _ = WA.predict_train(parameters, model_id=model_id, path=path, replica=replica, Plot=False)\n scores.append(score)\n scores = np.array(scores)\n simset = np.array(simset)\n selected_scores = scores[scores <= percentage]\n selected_sims = simset[scores <= percentage]\n return selected_sims, selected_scores, scores\n\ndef select_on_prediction(simset, chi_squared, percentage,\n Nparams=44,\n model_id='init_limit_general_energies_v2',\n precalculated=False, score=None,\n save_scores=True, filename='select_with_predcitions.txt'\n ):\n '''\n Select those solutions that whose model prediction on the training data differs no more than x% from\n the prediction belonging to the solution with the lowest chi-squared value in the set of simulations.\n '''\n if not precalculated:\n # ----- Start selection: Retrieve the best fit first --------\n chi_squared = np.array(chi_squared)\n best_fit = simset[np.argmin(chi_squared)]\n parameters = gpf.load_simm_anneal(best_fit, Nparams)\n _, model_best, _ = plt_B.calc_predictions(parameters=parameters, model_id=model_id)\n # model_best = replace_lower_triangle(model_best)\n \n # ----- Compare difference in model prediction to this best fit ---\n score = []\n for sim in simset:\n parameters = gpf.load_simm_anneal(sim, Nparams)\n _, model, _ = plt_B.calc_predictions(parameters=parameters, model_id=model_id)\n diff = difference_model_predictions(model, model_best)\n score.append( diff )\n score = np.array(score)\n \n # ----- select simulations whose difference in predicted values differs less then x% from the best fit ----\n selected_scores = score[score <= percentage]\n \n simset = np.array(simset)\n selected_sims = simset[score <= percentage]\n \n # ---- return selected_sims and selected_scores ------\n if save_scores:\n np.savetxt(filename, score)\n \n return selected_sims, selected_scores, score\n\n\n\ndef select_on_chi2(Chi2, simset, percentage=0.05):\n '''\n Select those solutions that whose chi-squared differs no more than x% from\n the lowest chi-squared value in the set of simulations.\n '''\n # ---- find minimum value of Chi-squared amongst replicates ---\n best_solution = min(Chi2)\n\n # ---- select those simulations with a chi2 that differs no more than x% from the best solution ---\n selected_solutions = Chi2[Chi2 <= ((1 + percentage) * best_solution)]\n\n # ----- select the simulations files ----\n simset = np.array(simset)\n selected_sims = simset[Chi2 <= ((1 + percentage) * best_solution)]\n return selected_solutions, selected_sims, (1 + percentage) * best_solution\n\ndef Tukey_outlier_test(data, simset, k=1.5):\n '''\n Use 'Tukey outlier test' to filter outliers from dataset\n\n protocol:\n 1. Assume the data to be Gaussian distributed (null-hypothesis)\n 2. A datapoint is NOT an outlier if it lies within the 25% and 75% percentiles\n 3. p-value is the probability of obtaining a datapoint outside this interval. Prob. to reject the null-hypothsis.\n '''\n\n # ---- Tukey's test says: x in [Q1-1.5*IQR, Q3+1.5*IQR], with IQR=Q3-Q1 ----\n Q1 = np.percentile(data, 25)\n Q3 = np.percentile(data, 75)\n IQR = Q3 - Q1\n low = Q1 - k * IQR\n high = Q3 + k * IQR\n\n # ---- Choose the datapoints ---\n Tukey_data = data[(data <= high) & (data >= low)]\n\n # ---- select corresponding simulations -----\n simset = np.array(simset)\n Tukey_sims = simset[(data <= high) & (data >= low)]\n return Tukey_data, Tukey_sims, low, high\n\n\n","sub_path":"Diewertje/analysis_SA_fits_Diewertje.py","file_name":"analysis_SA_fits_Diewertje.py","file_ext":"py","file_size_in_byte":20647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"296829364","text":"from blog.forms import EntryForm, TagForm\nfrom django.http import Http404\nfrom django.shortcuts import render_to_response, redirect, get_object_or_404\nfrom django.template.context import RequestContext\nfrom blog.models import Entry, Tag\nfrom base.middleware import blogAttributes\nfrom django.contrib import messages, auth\nfrom django.contrib.auth.decorators import login_required\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\ndef blog_view(request):\n \"\"\"\n base view for the blog\n :param request:\n :return:\n \"\"\"\n options = blogAttributes()\n options['entries'] = Entry.objects.all()\n return render_to_response('blog.html', options, context_instance=RequestContext(request))\n\n\ndef blog_entries_by_tag_view(request,tag):\n \"\"\"\n shows the user all the entries with a specified tag\n :param request:\n :param tag:\n :return:\n \"\"\"\n options = blogAttributes()\n # not do-able because no contains function :(\n options['entries'] = Entry.objects.filter(tags__contains=tag)\n options['tag'] = tag\n return render_to_response('blog.html', options, context_instance=RequestContext(request))\n\n\ndef blog_entry_view(request, titleSlug=None, eID=None):\n \"\"\"\n lets superusers add a blog entry\n :param request:\n :return:\n \"\"\"\n options = blogAttributes()\n if titleSlug is not None:\n options['entry'] = get_object_or_404(Entry, title_slug=titleSlug)\n elif eID is not None:\n options['entry'] = get_object_or_404(Entry, id=eID)\n else:\n raise Http404\n return render_to_response('entry.html', options, context_instance=RequestContext(request))\n\n@login_required\ndef blog_entry_edit_view(request, eID):\n \"\"\"\n lets superusers edit a blog entry\n :param request:\n :return:\n \"\"\"\n options = blogAttributes()\n entry = get_object_or_404(Entry, id=eID)\n options['entry'] = entry\n options['form'] = EntryForm(instance=entry)\n if request.POST and request.method == 'POST':\n entryForm = EntryForm(request.POST, instance=get_object_or_404(Entry, id=eID))\n if entryForm.is_unique(request, entry):\n if entryForm.has_changed():\n entry = entryForm.customSave(request.user)\n # loop through the tags\n if len(entry.tags) > 0:\n tags = entry.tags.split(',')\n for tag in tags:\n # if the tag doesn't exist\n if not Tag.objects.filter(name=tag).exists():\n # save the tag\n t = Tag()\n t.name = tag\n t.save()\n messages.add_message(request, messages.SUCCESS, 'The Entry has been updated')\n return redirect(blog_entry_view, titleSlug=entry.title_slug)\n else:\n messages.add_message(request, messages.INFO, 'No changes have been made')\n else:\n messages.add_message(request, messages.ERROR, 'An Entry with this Title already exists')\n\n return render_to_response('entryForm.html', options, context_instance=RequestContext(request))\n\n@login_required\ndef blog_entry_add_view(request):\n \"\"\"\n lets superusers add a blog entry\n :param request:\n :return:\n \"\"\"\n options = blogAttributes()\n entryForm = EntryForm()\n options['form'] = entryForm\n options['tags'] = Tag.objects.all()\n if request.POST and request.method == 'POST':\n entryForm = EntryForm(request.POST)\n if entryForm.is_unique(request):\n entry = entryForm.customSave(request.user)\n # loop through the tags\n if len(entry.tags) > 0:\n tags = entry.tags.split(',')\n for tag in tags:\n # if the tag doesn't exist\n if not Tag.objects.filter(name=tag).exists():\n # save the tag\n t = Tag()\n t.name = tag\n t.save()\n messages.add_message(request, messages.SUCCESS, 'The Entry has been saved')\n return redirect(blog_view)\n else:\n messages.add_message(request, messages.ERROR, 'An Entry with this Title already exists')\n\n return render_to_response('entryForm.html', options, context_instance=RequestContext(request))\n\n\ndef blog_entry_comment_added_view(request):\n \"\"\"\n Shows the entry page with a success alert after a comment has been added\n :param request:\n :return:\n \"\"\"\n messages.add_message(request, messages.SUCCESS, 'The Comment has been saved')\n return redirect(blog_entry_view(request,eID=request.GET['c']))\n\n\ndef blog_login_view(request):\n \"\"\"\n provides a page so users can log in\n :param request:\n :return:\n \"\"\"\n options = blogAttributes()\n if request.method == 'POST':\n uname = request.POST.get('user', '')\n psword = request.POST.get('pwd', '')\n user = auth.authenticate(username=uname, password=psword)\n # if the user logs in and is active\n if user is not None and user.is_active:\n auth.login(request, user)\n messages.add_message(request, messages.SUCCESS, 'You have been logged in')\n return redirect(blog_view)\n else:\n # user failed to login\n messages.add_message(request, messages.ERROR, 'You entered your User Name or Password incorrectly')\n return render_to_response('login.html', options, context_instance=RequestContext(request))\n # default view for user to login\n else:\n return render_to_response('login.html', options, context_instance=RequestContext(request))\n\n\ndef blog_logout_view(request):\n auth.logout(request)\n messages.add_message(request, messages.SUCCESS, 'You have been logged out')\n return redirect(blog_view)\n\n\n@login_required\ndef blog_tags_view(request, tID=None):\n \"\"\"\n lets logged in superusers see all tags\n :param request:\n :return:\n \"\"\"\n options = blogAttributes()\n if tID is not None:\n tag = get_object_or_404(Tag, id=tID)\n options['form'] = TagForm(instance=tag)\n options['tag'] = tag\n else:\n options['form'] = TagForm()\n options['tags'] = Tag.objects.all()\n return render_to_response('tags.html', options, context_instance=RequestContext(request))\n\n@login_required\ndef blog_tag_delete_view(request, tID):\n \"\"\"\n lets logged in superusers add tags\n :param request:\n :return:\n \"\"\"\n tag = get_object_or_404(Tag, id=tID)\n tag.delete()\n messages.add_message(request, messages.SUCCESS, 'The Tag has been deleted')\n return redirect(blog_tags_view)\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"405424021","text":"import unittest\n\nfrom nose.plugins.attrib import attr\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom threefour.plugin import ServerPlugin\n\n\nclass TestSettings(unittest.TestCase):\n def setUp(self):\n self._old_database_settings = settings.DATABASES\n\n def tearDown(self):\n settings.DATABASES = self._old_database_settings\n\n def test_ok_with_sqlite(self):\n settings.DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'default.db',\n },\n 'secondary': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'secondary.db',\n }\n }\n\n sp = ServerPlugin()\n\n self.assertTrue(sp.check_database_multithread_compilant())\n\n def test_checks_in_memory_db(self):\n settings.DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': ':memory:',\n }\n }\n\n sp = ServerPlugin()\n\n self.assertRaises(ImproperlyConfigured, sp.check_database_multithread_compilant)\n\n def test_checks_in_memory_db_test_name(self):\n settings.DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'default.db',\n 'TEST_NAME': ':memory:',\n }\n }\n\n sp = ServerPlugin()\n\n self.assertRaises(ImproperlyConfigured, sp.check_database_multithread_compilant)\n","sub_path":"src/threefour/tests/test_settings.py","file_name":"test_settings.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"20041906","text":"#!/usr/bin/env python3\nimport cgi\nimport cgitb\nimport glob\nimport json\nfrom os import listdir,path\nfrom os.path import isfile, join\nimport pathlib\n\n\nprint(\"Content-Type: text/json\") \nprint()\n\ncgitb.enable()\nform = cgi.FieldStorage()\n\npostPath = form.getvalue('path')\npostData = form.getvalue('data')\n\n\nfileRoot = './'\n\nresult = {'success':'0'}\n\nif( postPath and postData ):\n\t\n\tif( postPath.endswith('.js') ):\n\t\tdir = fileRoot + (postPath).replace( '..', '')\n\t\ttry:\n\t\t\twith open(dir, 'w+') as f:\n\t\t\t\tf.write(postData)\n\t\texcept Exception as e:\n\t\t\tresult = {\n\t\t\t\t'error' : '2',\n\t\t\t\t'msg' : 'Couldn\\'t write to file: '+postPath+ str(e)\n\t\t\t}\n\telse:\n\t\tresult = {\n\t\t\t'error' : '3',\n\t\t\t'msg' : 'File must have a .js suffix'\n\t\t}\n\nelse:\n\tresult = {\n\t\t'error' : '1',\n\t\t'msg' : 'No Data or Path specified'\n\t}\n\n\nprint(json.dumps(result))\n''' \ndef save(self):\n resp = {'error': 0}\n if 'path' in self.post_params and 'data' in self.post_params:\n path = self.post_params['path'][0]\n path = os.path.join(BASE_DIR, path.replace('..', ''))\n data = self.post_params['data'][0]\n\n if path.endswith('.js'):\n try:\n open(path, 'w').write(data)\n except:\n resp['error'] = 2\n resp['msg'] = 'Couldn\\'t write to file %d' % path\n\n else:\n resp['error'] = 3\n resp['msg'] = 'File must have a .js suffix'\n\n else:\n resp['error'] = 1\n resp['msg'] = 'No Data or Path specified'\n\n return self.send_json(resp)\n\n'''","sub_path":"cgi-bin/save.py","file_name":"save.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"371976895","text":"#\n# # from openpyxl import Workbook\n# # wb=Workbook()\n# # sheet=wb.active\n# # sheet[\"A1\"]=\"username\"\n# # sheet[\"b1\"]=\"password\"\n# # sheet[\"a2\"]=\"tester1\"\n# # sheet[\"b2\"]=\"1234\"\n# # sheet[\"a3\"]=\"tester2\"\n# # sheet[\"b3\"]=\"45678\"\n# # wb.save(filename=\"login.xlsx\")\n# from openpyxl import load_workbook\n# wb=load_workbook(filename=\"login.xlsx\")\n# sheet=wb.active\n# print(sheet)\n# print(sheet[\"a1\"].value)\n# print(sheet[\"a2\"].value)\n# print(sheet[\"a3\"].value)\n# print(sheet[\"b1\"].value)\n# print(sheet[\"b2\"].value)\n# print(sheet[\"b3\"].value)\n# # for value in sheet.iter_rows(min_row=1,\n# # max_row=3,\n# # min_col=1,\n# # max_col=2,values_only=True):\n# # print(value)\n# for value in sheet.iter_cols(min_row=1,\n# max_row=3,\n# min_col=1,\n# max_col=2,values_only=True):\n# print(value)\n# import\nfrom openpyxl import Workbook\nwb=Workbook()\nsheet=wb.active\nsheet[\"b1\"]=\"test1\"\nsheet[\"c1\"]=\"test2\"\nsheet[\"d1\"]=\"test3\"\nsheet[\"e1\"]=\"test4\"\nsheet[\"f1\"]=\"test5\"\nsheet[\"g1\"]=\"test6\"\nsheet[\"h1\"]=\"test7\"\nsheet[\"a2\"]=\"category\"\nsheet[\"a3\"]=\"product id\"\nsheet[\"a4\"]=\"quantity\"\nsheet[\"a5\"]=\"color\"\nsheet[\"b2\"]=\"speakers\"\nsheet[\"b3\"]=\"19\"\nsheet[\"b4\"]=\"2\"\nsheet[\"b5\"]=\"red\"\nsheet[\"c2\"]=\"headphones\"\nsheet[\"c3\"]=\"12\"\nsheet[\"c4\"]=\"2\"\nsheet[\"c5\"]=\"purple\"\nsheet[\"d2\"]=\"mice\"\nsheet[\"d3\"]=\"28\"\nsheet[\"d4\"]=\"\"\nsheet[\"d5\"]=\"green\"\nsheet[\"e2\"]=\"mice\"\nsheet[\"e3\"]=\"30\"\nsheet[\"e4\"]=\"1\"\nsheet[\"e5\"]=\"\"\nsheet[\"f2\"]=\"laptops\"\nsheet[\"f3\"]=\"9\"\nsheet[\"f4\"]=\"2\"\nsheet[\"f5\"]=\"blue\"\nsheet[\"g2\"]=\"laptops\"\nsheet[\"g3\"]=\"7\"\nsheet[\"g4\"]=\"3\"\nsheet[\"g5\"]=\"\"\nsheet[\"h2\"]=\"tablets\"\nsheet[\"h3\"]=\"18\"\nsheet[\"h4\"]=\"\"\nsheet[\"h5\"]=\"yellow\"\nwb.save(filename=\"testdata.xlsx\")\nfrom openpyxl import load_workbook\nwb=load_workbook(filename=\"testdata.xlsx\")\nsheet=wb.active\ntests = {}\ntest_arr = {}\nfor col in sheet.iter_cols(min_row=1,\n max_row=5,\n min_col=2,\n max_col=7,values_only=True):\n\n test_name = col[0]\n tests = {\n \"category\":col[1],\n \"product_id\":col[2],\n \"quantity\":col[3],\n \"color\":col[4]\n }\n test_arr[test_name] = tests\nprint(test_arr)\n# import json\n# json=json.dumps(value)\n# print(json)","sub_path":"venv/timemodules.py","file_name":"timemodules.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"294087223","text":"def backSolve(A,b):\n '''\n :param A: The upper triangular matrix to be back substituted\n :param b: The b vector in the system of equations\n :return: The solution vector x.\n '''\n numRows, numColumns = len(A), len(A)\n xVec = [0] * numRows\n for row in range(numRows - 1, -1, -1):\n sum = b[row]\n for column in range(row +1, numColumns):\n sum -= A[row][column] * xVec[column]\n xVec[row] = sum / A[row][row]\n return xVec\n\n\nif __name__ == '__main__':\n testMatrix = [\n [1,2,3],\n [0,1,2],\n [0,0,10]\n ]\n testB = [6,4,30]\n solution = backSolve(testMatrix,testB)\n print(solution)\n\n\n","sub_path":"lib/BackSolve.py","file_name":"BackSolve.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"587704848","text":"# coding=utf-8\n\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.cart_item, name='cart_item'),\n url(r'^adicionar/(?P[\\w_-]+)/$', views.create_cartitem,name='create_cartitem'),\n url(r'^finalizar/$', views.checkout, name='checkout'),\n\n]\n","sub_path":"checkout/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"159337065","text":"## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.\n## Use of this source code is governed by a BSD-style license that can be\n## found in the COPYING file.\n\nimport pytest\n\nimport qibuild.config\n\ndef test_show(qibuild_action):\n # Just check it does not crash for now\n qibuild_action(\"config\")\n\ndef test_run_wizard(qibuild_action, interact):\n interact.answers = {\n \"generator\" : \"Unix Makefiles\",\n \"ide\" : \"None\",\n }\n\n qibuild_action(\"config\", \"--wizard\")\n","sub_path":"python/qibuild/test/test_qibuild_config.py","file_name":"test_qibuild_config.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"279417436","text":"import os\n\n__author__ = 'delur'\nimport cPickle as pickle\n\ndef write_picklefile(pickle_object, filename, paths):\n path = os.path.join(paths[\"working_dir\"] , \"pickles\")\n if not os.path.exists(path):\n os.makedirs(path)\n\n\n output = open(os.path.join(path, filename + \".pkl\"), 'wb')\n pickle.dump(pickle_object, output)\n output.close()","sub_path":"masterthesis/writer/pickle_file.py","file_name":"pickle_file.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"184566876","text":"import numpy as np\nimport numpy \nimport cvxopt\nimport cvxopt.solvers\nimport time, threading\nimport matplotlib \nimport weakref\nimport random\n\n\nlimit = 0\n\ndef kernel(x1, x2):\n linear = np.dot(x1, x2)\n return linear\n\nclass SupportVectorMachine(object):\n def __init__(self, kernel=kernel, C=None):\n self.kernel = kernel\n self.C = C\n if self.C is not None: \n self.C = float(self.C)\n\n def compute(self, X, y):\n n_samples, n_features = X.shape\n K = np.zeros((n_samples, n_samples))\n for i in range(n_samples):\n for j in range(n_samples):\n K[i,j] = self.kernel(X[i], X[j])\n\n P = cvxopt.matrix(np.outer(y,y) * K)\n q = cvxopt.matrix(np.ones(n_samples) * -1)\n A = cvxopt.matrix(y, (1,n_samples))\n b = cvxopt.matrix(0.0)\n\n if self.C is None:\n G = cvxopt.matrix(np.diag(np.ones(n_samples) * -1))\n h = cvxopt.matrix(np.zeros(n_samples))\n else:\n tmp1 = np.diag(np.ones(n_samples) * -1)\n tmp2 = np.identity(n_samples)\n G = cvxopt.matrix(np.vstack((tmp1, tmp2)))\n tmp1 = np.zeros(n_samples)\n tmp2 = np.ones(n_samples) * self.C\n h = cvxopt.matrix(np.hstack((tmp1, tmp2)))\n solution = cvxopt.solvers.qp(P, q, G, h, A, b)\n a = np.ravel(solution['x'])\n sv = a > 1e-5\n ind = np.arange(len(a))[sv]\n self.a = a[sv]\n self.sv = X[sv]\n self.sv_y = y[sv]\n self.b = 0\n for n in range(len(self.a)):\n self.b += self.sv_y[n]\n self.b -= np.sum(self.a * self.sv_y * K[ind[n],sv])\n self.b /= len(self.a)\n\n if self.kernel == kernel:\n self.w = np.zeros(n_features)\n for n in range(len(self.a)):\n self.w += self.a[n] * self.sv_y[n] * self.sv[n]\n else:\n self.w = None\n\n def supportVector(self, X):\n if self.w is not None:\n return np.dot(X, self.w) + self.b\n else:\n y_predict = np.zeros(len(X))\n for i in range(len(X)):\n s = 0\n for a, sv_y, sv in zip(self.a, self.sv_y, self.sv):\n s += a * sv_y * self.kernel(X[i], sv)\n y_predict[i] = s\n return y_predict + self.b\n\n def predict(self, X):\n return np.sign(self.supportVector(X))\n\n\nif __name__ == \"__main__\":\n import pylab \n \n\n # Produce random data-set\n def randomizeData():\n rand1 = random.randrange(0, 5) # essentially data grid size\n rand2 = random.randrange(0, 5)\n rand3 = random.randrange(0, 5)\n rand4 = random.randrange(0, 5)\n firstMean = np.array([rand1, rand2])\n secondMean = np.array([rand3, rand4])\n\n covariance = np.array([[1.5, 1.0], [1.0, 1.5]])\n\n # Gaussian distribution \n X1 = np.random.multivariate_normal(firstMean, covariance, 100)\n X1 = np.random.multivariate_normal(firstMean, covariance, 100)\n\n Y1 = np.ones(len(X1)) # grid of ones\n\n X2 = np.random.multivariate_normal(secondMean, covariance, 100)\n \n Y2 = np.ones(len(X2)) * -1 # grid of -1's\n return X1, Y1, X2, Y2\n\n def train(X1, Y1, X2, Y2):\n learnX1 = X1[:90]\n learnY1 = Y1[:90]\n learnX2 = X2[:90]\n learnY2 = Y2[:90]\n learnX = np.vstack((learnX1, learnX2)) #make stack horizontal\n learnY = np.hstack((learnY1, learnY2)) #make stack horizontal\n return learnX, learnY\n\n def data(X1, Y1, X2, Y2):\n X1_test = X1[90:]\n y1_test = Y1[90:]\n X2_test = X2[90:]\n y2_test = Y2[90:]\n X_test = np.vstack((X1_test, X2_test))\n y_test = np.hstack((y1_test, y2_test))\n return X_test, y_test\n\n def plot_margin(learnX1, learnX2, clf):\n def f(x, w, b, c=0):\n return (-w[0] * x - b + c) / w[1]\n\n pylab.plot(learnX1[:,0], learnX1[:,1], \"ro\")\n pylab.plot(learnX2[:,0], learnX2[:,1], \"bo\")\n pylab.scatter(clf.sv[:,0], clf.sv[:,1], s=100, c=\"g\")\n\n a0 = -2; a1 = f(a0, clf.w, clf.b)\n b0 = 2; b1 = f(b0, clf.w, clf.b)\n pylab.plot([a0,b0], [a1,b1], \"k\")\n\n a0 = -2; a1 = f(a0, clf.w, clf.b, 1)\n b0 = 2; b1 = f(b0, clf.w, clf.b, 1)\n pylab.plot([a0,b0], [a1,b1], \"k--\")\n\n a0 = -2; a1 = f(a0, clf.w, clf.b, -1)\n b0 = 2; b1 = f(b0, clf.w, clf.b, -1)\n pylab.plot([a0,b0], [a1,b1], \"k--\")\n\n pylab.axis(\"tight\")\n pylab.show()\n\n def createPlot(learnX1, learnX2, clf, percent):\n pylab.clf() \n pylab.clf()\n pylab.cla()\n pylab.plot(learnX1[:,0], learnX1[:,1], \"ro\")\n pylab.plot(learnX2[:,0], learnX2[:,1], \"bo\")\n pylab.scatter(clf.sv[:,0], clf.sv[:,1], s=100, c=\"y\")\n\n X1, X2 = np.meshgrid(np.linspace(-10,10,10), np.linspace(-10,10,10))\n X = np.array([[x1, x2] for x1, x2 in zip(np.ravel(X1), np.ravel(X2))])\n Z = clf.supportVector(X).reshape(X1.shape)\n pylab.contour(X1, X2, Z, [0.0], colors='black', linewidths=1.5, origin='lower')\n pylab.contour(X1, X2, Z + 1, [0.0], colors='blue', linewidths=2, origin='lower')\n pylab.contour(X1, X2, Z - 1, [0.0], colors='red', linewidths=2, origin='lower')\n sign = \"%\"\n pylab.title(\"Percentage of correct predictions %d %%\" % (percent))\n pylab.axis(\"tight\")\n\n pylab.show()\n \n\n \n def createData():\n X1, Y1, X2, Y2 = randomizeData()\n learnX, learnY = train(X1, Y1, X2, Y2)\n X_test, y_test = data(X1, Y1, X2, Y2)\n clf = SupportVectorMachine(C=0.1)\n clf.compute(learnX, learnY)\n y_predict = clf.predict(X_test)\n correct = np.sum(y_predict == y_test)\n correctNum = float(correct) \n predict = float(len(y_predict)) \n percent = (correctNum/predict)*100\n createPlot(learnX[learnY==1], learnX[learnY==-1], clf , percent)\n\n def generator():\n try:\n global limit \n limit = limit - 1\n now = time.time()\n threading.Timer(3, generator).start()\n later = now + 10\n #if (limit > 0):\n createData() #indent and remove comments for limit c\n #os._exit()\n except RuntimeError:\n pass \n #limit = input(\"Please enter maximum number of graphs to generate: \")\n #limit = int(limit)+1\n generator()\n\n","sub_path":"svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":6448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"632450690","text":"print(\"Hello, World!\")\r\n\r\nx = 1\r\ny = 2.3\r\nz = 3j\r\n\r\nprint(type(x))\r\nprint(type(y))\r\nprint(type(z))\r\n\r\nprint('Hello')\r\nprint(\"Hello\")\r\n\r\na = \"Hello, World!\"\r\n\r\nprint(a)\r\nprint(len(a))\r\nprint(a[3:len(a)])\r\n\r\nb = a.replace(\"o\",\"0\")\r\nprint(b)\r\nprint(a.split(\",\"))\r\n\r\ntxt = \"The rain in Spain stays mainly in the plain\"\r\nx = \"ain\" in txt\r\ny = \"ain\" not in txt\r\nprint(x) # True\r\nprint(y) # False\r\n\r\n\r\nage = 36\r\ntxt = \"My name is John, I am {}\"\r\nprint(txt.format(age))\r\n\r\n\r\nquantity = 3\r\nitemno = 567\r\nprice = 49.95\r\nmyorder = \"I want {} pieces of item {} for {} dollars.\"\r\nprint(myorder.format(quantity, itemno, price))\r\n\r\n# Output: I want 3 pieces of item 567 for 49.95 dollars.\r\n\r\nmyorder = \"I want {2} pieces of item {1} for {0} dollars.\"\r\nprint(myorder.format(price, itemno, quantity))\r\n\r\na = int(1) \t # a will be 1\r\nb = int(2.8)\t # b will be 2\r\nc = int(\"3\") \t # c will be 3\r\n\r\nx = float(1) # x will be 1.0\r\ny = float(2.8) # y will be 2.8\r\nz = float(\"3\") # z will be 3.0\r\nw = float(\"4.2\") # w will be 4.2\r\n\r\nj = str(\"s1\") \t # j will be 's1'\r\nk = str(2) \t # k will be '2'\r\nl = str(3.0) \t # l will be '3.0' \r\n\r\nfruits = \"apple is my favorite\"\r\nfor fruit in fruits:\r\n print(fruit) \r\n\r\nx = range(6)\r\nprint(x)\r\n\r\nfor y in x:\r\n print(y)\r\n\r\nname = \"Ali\"\r\n\r\nprint(name, 2)","sub_path":"python/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"193483213","text":"import torch\nfrom torch import nn\nimport torchvision\nfrom torchvision.models.mobilenet import mobilenet_v2\nimport json \nimport numpy as np\nimport bcolz \nimport pickle\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\n\ndef create_word_embedding():\n # using word2vec\n vectors = bcolz.open(f'./Glove/6B.300.dat')[:]\n words = pickle.load(open(f'./Glove/6B.300_words.pkl', 'rb'))\n word2idx = pickle.load(open(f'./Glove/6B.300_idx.pkl', 'rb'))\n\n glove = {w: vectors[word2idx[w]] for w in words}\n with open('./caption_data/WORDMAP_coco_5_cap_per_img_5_min_word_freq.json', 'r') as j:\n target_vocab = json.load(j)\n matrix_len = len(target_vocab)\n weights_matrix = np.zeros((matrix_len, 300))\n words_found = 0\n\n for i, word in enumerate(target_vocab):\n word = word.lower()\n try: \n weights_matrix[i] = glove[word]\n words_found += 1\n except KeyError:\n weights_matrix[i] = np.random.normal(scale=0.6, size=(300, ))\n\n emb_layer = nn.Embedding(len(target_vocab),300)\n weights_matrix = torch.tensor(weights_matrix)\n emb_layer.load_state_dict({'weight': weights_matrix})\n\n return emb_layer\n\n\nclass Encoder(nn.Module):\n\n def __init__(self, encoded_image_size=14):\n super(Encoder, self).__init__()\n self.enc_image_size = encoded_image_size\n \n\n mobilenet = torchvision.models.mobilenet_v2(pretrained=True)\n layers = list(mobilenet.children())[:-1]\n self.encoder_net = nn.Sequential(*layers,nn.Conv2d(1280, 2048, 1) ) #nn.BatchNorm2d(num_features=2048,eps=1e-05, momentum=0.1, affine=True, track_running_stats=True )\n\n #self.conv1 = nn.Conv2d(1280, 2048, 1)\n\n self.adaptive_pool = nn.AdaptiveAvgPool2d((encoded_image_size, encoded_image_size))\n\n self.fine_tune()\n\n def forward(self, images):\n out = self.encoder_net(images) \n #out = self.conv1(out) # (batch_size, 2048, image_size/32, image_size/32)\n out = self.adaptive_pool(out) # 2048 filters \n out = out.permute(0, 2, 3, 1) \n return out\n\n def fine_tune(self, fine_tune=True):\n\n for p in self.encoder_net.parameters():\n p.requires_grad = False\n # If fine-tuning, only fine-tune convolutional blocks 2 through 4\n for c in list(self.encoder_net.children())[5:]:\n for p in c.parameters():\n p.requires_grad = fine_tune\n\n\nclass Attention(nn.Module):\n def __init__(self, encoder_dim, decoder_dim, attention_dim):\n super(Attention, self).__init__()\n self.encoder_att = nn.Linear(encoder_dim, attention_dim) # linear layer to transform encoded image\n self.decoder_att = nn.Linear(decoder_dim, attention_dim) # linear layer to transform decoder's output\n self.full_att = nn.Linear(attention_dim, 1) # linear layer to calculate values to be softmax-ed\n self.relu = nn.ReLU()\n self.softmax = nn.Softmax(dim=1) \n\n def forward(self, encoder_out, decoder_hidden):\n\n att1 = self.encoder_att(encoder_out) \n att2 = self.decoder_att(decoder_hidden) \n att = self.full_att(self.relu(att1 + att2.unsqueeze(1))).squeeze(2) \n alpha = self.softmax(att) \n attention_weighted_encoding = (encoder_out * alpha.unsqueeze(2)).sum(dim=1) \n\n return attention_weighted_encoding, alpha\n\n\n\n\nclass DecoderWithAttention(nn.Module):\n\n def __init__(self, attention_dim, embed_dim, decoder_dim, vocab_size, encoder_dim=2048, dropout=0.5):\n super(DecoderWithAttention, self).__init__()\n\n self.encoder_dim = encoder_dim\n self.attention_dim = attention_dim\n self.embed_dim = embed_dim\n self.decoder_dim = decoder_dim\n self.vocab_size = vocab_size\n self.dropout = dropout\n\n self.attention = Attention(encoder_dim, decoder_dim, attention_dim) \n\n self.embedding = create_word_embedding() \n self.dropout = nn.Dropout(p=self.dropout)\n self.decode_step = nn.LSTMCell(embed_dim + encoder_dim, decoder_dim, bias=True) \n self.init_h = nn.Linear(encoder_dim, decoder_dim) \n self.init_c = nn.Linear(encoder_dim, decoder_dim) \n self.f_beta = nn.Linear(decoder_dim, encoder_dim) \n self.sigmoid = nn.Sigmoid()\n self.fc = nn.Linear(decoder_dim, vocab_size) \n self.init_weights() \n\n def init_weights(self):\n\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)\n\n def load_pretrained_embeddings(self, embeddings):\n self.embedding.weight = nn.Parameter(embeddings)\n\n def fine_tune_embeddings(self, fine_tune=True):\n for p in self.embedding.parameters():\n p.requires_grad = fine_tune\n\n def init_hidden_state(self, encoder_out):\n mean_encoder_out = encoder_out.mean(dim=1)\n h = self.init_h(mean_encoder_out) # (batch_size, decoder_dim)\n c = self.init_c(mean_encoder_out)\n return h, c\n\n def forward(self, encoder_out, encoded_captions, caption_lengths):\n\n batch_size = encoder_out.size(0)\n encoder_dim = encoder_out.size(-1)\n vocab_size = self.vocab_size\n\n \n encoder_out = encoder_out.view(batch_size, -1, encoder_dim) \n num_pixels = encoder_out.size(1)\n\n caption_lengths, sort_ind = caption_lengths.squeeze(1).sort(dim=0, descending=True)\n encoder_out = encoder_out[sort_ind]\n encoded_captions = encoded_captions[sort_ind]\n\n embeddings = self.embedding(encoded_captions) \n\n # Initialize LSTM state\n h, c = self.init_hidden_state(encoder_out) \n\n \n decode_lengths = (caption_lengths - 1).tolist()\n\n \n predictions = torch.zeros(batch_size, max(decode_lengths), vocab_size).to(device)\n alphas = torch.zeros(batch_size, max(decode_lengths), num_pixels).to(device)\n\n \n for t in range(max(decode_lengths)):\n batch_size_t = sum([l > t for l in decode_lengths])\n attention_weighted_encoding, alpha = self.attention(encoder_out[:batch_size_t],\n h[:batch_size_t])\n gate = self.sigmoid(self.f_beta(h[:batch_size_t])) \n attention_weighted_encoding = gate * attention_weighted_encoding\n h, c = self.decode_step(\n torch.cat([embeddings[:batch_size_t, t, :], attention_weighted_encoding], dim=1),\n (h[:batch_size_t], c[:batch_size_t])) \n preds = self.fc(self.dropout(h)) \n predictions[:batch_size_t, t, :] = preds\n alphas[:batch_size_t, t, :] = alpha\n\n return predictions, encoded_captions, decode_lengths, alphas, sort_ind\n","sub_path":"Model/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"429450771","text":"# Markov Logic Networks -- WCSP conversion\n#\n# (C) 2012 by Daniel Nyga (nyga@cs.tum.edu)\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport copy\nfrom collections import Iterable\n\nclass VarSet(object):\n '''\n Represents a set of positive and negative (binary) random variables\n '''\n \n def __init__(self, posVars=None, negVars=None):\n '''\n posVars and negVars being sets\n '''\n if posVars is None:\n posVars = frozenset()\n else:\n posVars = frozenset(posVars)\n if negVars is None:\n negVars = frozenset()\n else:\n negVars = frozenset(negVars)\n self.__vars = (posVars, negVars)\n \n def isDisjoint(self, other):\n return len(self.pos().intersection(other.pos())) == 0 and len(self.neg().intersection(other.pos())) == 0\n \n def pos(self):\n return self.__vars[0]\n \n def neg(self):\n return self.__vars[1]\n\n def remove(self, other):\n self.__vars = (self.pos() - other.pos(), self.neg() - other.neg())\n \n def isEmpty(self):\n return len(self.pos()) == 0\n \n def union(self, varset):\n return (self.pos().union(varset.pos()), self.neg().union(varset.neg()))\n \n def __hash__(self):\n return self.__vars.__hash__()\n \n def __eq__(self, other):\n return self.pos() == other.pos() and self.neg() == other.neg()\n \n def __len__(self):\n return len(self.pos()) + len(self.neg())\n \n def __str__(self):\n pos = list(self.pos())\n neg = list(self.neg())\n retStr = ';'.join([str(x) for x in sorted(pos)])\n if len(self.neg()) > 0:\n retStr += ';$'\n return retStr + ';$'.join([str(x) for x in sorted(neg)])\n\nclass Node(object):\n '''\n Represents a node in a graph\n '''\n \n def __init__(self, node_id, data=None):\n self.id = node_id\n self.data = data\n self.children = set()\n self.parents = set()\n \n def addChild(self, child):\n '''\n Adds a child to the node.\n '''\n self.children.add(child)\n child.parents.add(self)\n \n def addChildren(self, children):\n for c in children:\n self.addChild(c)\n \n def addParent(self, parent):\n '''\n Adds a parent to the node.\n '''\n self.parents.add(parent)\n parent.children.add(self)\n \n def addParents(self, parents):\n for p in parents:\n self.addParent(p)\n \n def removeChild(self, child):\n child.parents.remove(self)\n self.children.remove(child)\n \n def siblings(self, excludeMyself=True):\n '''\n Returns all siblings (children of parents) of this node, excluding the node itself (or not).\n '''\n sibl = set()\n for p in self.parents:\n sibl.update(p.children)\n if self in sibl and excludeMyself is True:\n sibl.remove(self)\n return sibl\n\n def stepParents(self):\n '''\n Returns the set of all stepparents (i.e. the set of all parents of all siblings\n that are not parents of this node.\n '''\n stepParents = set()\n for s in self.siblings():\n stepParents.update(s.parents)\n return stepParents.difference(self.parents)\n \n def stepChildren(self):\n stepChildren = set()\n cParents = set()\n for c in self.children:\n cParents.update(c.parents.difference([self]))\n for cp in cParents:\n stepChildren.update(cp.children.difference(self.children))\n return stepChildren\n \n def __str__(self):\n return '{}'.format(str(self.id))\n \n def __eq__(self, other):\n return self.id == other.id\n\n \n \nclass DAG(object):\n '''\n Represents a (rooted) directed acyclic graph (DAG)\n '''\n \n def __init__(self, root):\n self.root = root\n \n def traverse(self, algo='DFS', direction='down', root=None):\n '''\n Iterates over the graph nodes according to the given strategy. \n Currently, depth-first search (DFS) and breadth first search \n (BFS) are supported. Caution: Since children are sets, this \n is nondeterministic! If an id is given (!=None), then the algorithm\n stops at the first node, the id field of which is equal to id.\n '''\n if root is None:\n root = self.root\n queue = [root]\n processed = set()\n processed.add(root)\n while len(queue) > 0:\n n = queue.pop()\n yield n\n if algo == 'BFS':\n queue = self.bfsEnqueue(n, queue, processed, direction)\n elif algo == 'DFS':\n queue = self.dfsEnqueue(n, queue, processed, direction)\n \n def dfsEnqueue(self, node, queue, processed, direction):\n if direction == 'up':\n nodes = sorted([x for x in node.parents if x not in processed])\n elif direction == 'down':\n nodes = sorted([x for x in node.children if x not in processed])\n processed.update(nodes)\n return queue + nodes\n\n def bfsEnqueue(self, node, queue, processed, direction):\n if direction == 'up':\n nodes = sorted([x for x in node.parents if x not in processed])\n elif direction == 'down':\n nodes = sorted([x for x in node.children if x not in processed])\n processed.update(nodes)\n return nodes + queue\n \n def getLeafNodes(self):\n leaves = set()\n for n in self.traverse():\n if len(n.children) == 0:\n leaves.add(n)\n return leaves\n \n def getNextNode(self):\n multipleInheritanceNodes = {}\n transClosureUp = set()\n for n in self.traverse():\n if len(n.parents) > 1:\n multipleInheritanceNodes[n] = self.transitiveClosure(n, 'down')\n multipleInheritanceNodes[n].remove(n)\n transClosure = self.transitiveClosure(n, 'up')\n transClosure.remove(n)\n transClosureUp.update(transClosure)\n for n in list(multipleInheritanceNodes.keys()):\n if not multipleInheritanceNodes[n].isdisjoint(list(multipleInheritanceNodes.keys())) or not transClosureUp.isdisjoint(n.siblings()):\n continue\n else:\n return n\n return None\n \n \n def ancestorSubgraph(self, nodes):\n '''\n Returns the subgraph from the given node to the root node(s).\n The result is a new instance of a graph and nodes.\n '''\n visited = set()\n newGraph = copy.deepcopy(self)\n if not isinstance(nodes, Iterable):\n nodes = [nodes]\n for n in nodes:\n visited.update(newGraph.transitiveClosure(n, direction='up'))\n for n in newGraph.traverse(algo='DFS', method='graph', direction='down'):\n if n in visited:\n n.children = visited.intersection(n.children)\n return newGraph\n \n def transitiveClosure(self, node, direction='down'):\n '''\n Returns the transitive closure for the given node\n '''\n for start in self.traverse(id = node.id): pass\n closure = set()\n for n in self.traverse(root=start, direction=direction):\n closure.update([n])\n return closure\n \n def findNodeByID(self, node_id):\n for n in self.traverse(id=node_id,algo='DFS'): pass\n return n\n \ndef processNode(dag, node):\n if len(node.parents) == 0:\n return None\n if len(node.parents) == 1:\n for p in node.parents: pass\n return p\n \n siblings = node.siblings(False)\n # determine all nodes that need to be processed\n nodesToMerge = set()\n for s in siblings:\n nodesToMerge.update(s.parents)\n# nodesToMerge.intersection_update(transHulls[node])\n # determine all transitive closures\n \n sndSiblings = set()\n for n in nodesToMerge:\n sndSiblings.update(n.children.difference(nodesToMerge))\n transHulls = {}\n for s in sndSiblings:\n transHulls[s] = dag.transitiveClosure(s, 'up')\n \n grandParents = set()\n for n in nodesToMerge:\n grandParents.update(n.parents.difference(nodesToMerge))\n grandParentsChildren = {}\n for gp in grandParents:\n grandParentsChildren[gp] = copy.copy(gp.children)\n \n # create the new node by merging all parents\n# parNodeIdUnion = set()\n# for p in parents:\n# parNodeIdUnion.update(p.id.pos())\n# newNode = Node(VarSet(parNodeIdUnion)) \n# node.parents = set()\n# newNode.addChild(node)\n# newNodes = {newNode.id: newNode}\n newNodes = {}\n \n # create corresponding nodes for all siblings\n for sibl in sndSiblings:\n siblParents = transHulls[sibl]\n parentsDiff = nodesToMerge.difference(siblParents)\n parentsIntersect = nodesToMerge.intersection(siblParents)\n posClasses = set()\n for p in parentsIntersect:\n posClasses.update(p.id.pos())\n negClasses = set()\n for p in parentsDiff:\n negClasses.update(p.id.pos())\n negClasses.difference_update(posClasses)\n nodeId = VarSet([str(x) for x in posClasses], [str(x) for x in negClasses])\n newNode = newNodes.get(nodeId, None)\n if newNode is None:\n newNode = Node(nodeId)\n newNodes[nodeId] = newNode\n newNode.addChild(sibl)\n sibl.parents.difference_update(parentsIntersect)\n \n# for newNode in newNodes.values():\n# positives = newNode.id.pos()\n# for n in nodesToMerge:\n\n for gp in grandParents:\n for sp in list(newNodes.values()):\n children = set()\n for gpc in grandParentsChildren[gp]:\n if not sp.id.pos().isdisjoint(gpc.id.pos()):\n children.add(sp)\n gp.addChildren(children)\n gp.children.difference_update(nodesToMerge)\n \n # eliminate transitive edges\n for nn in list(newNodes.values()):\n newParents = set()\n nnParents = nn.parents\n for par in nnParents:\n parents_ = nnParents.difference(set([par]))\n queue = list(parents_)\n notTransitive = True\n while len(queue) > 0:\n q = queue.pop()\n if q == par:\n notTransitive = False\n break\n queue.extend(q.parents)\n if notTransitive == True:\n newParents.add(par)\n else:\n par.children.remove(nn)\n nn.parents = newParents\n return newNode\n\nif __name__ == '__main__':\n pass\n# a = Node(VarSet('A'))\n# b = Node(VarSet('B'))\n# c = Node(VarSet('C'))\n# d = Node(VarSet('D'))\n# e = Node(VarSet('E'))\n# f = Node(VarSet('F'))\n# g = Node(VarSet('G'))\n## h = Node(VarSet('H'))\n## t = Node(VarSet('T'))\n## r = Node('R')\n## z = Node('Z')\n# a.addChildren([b,d])\n# b.addChildren([c])\n# d.addChildren([e])\n# f.addParents([c,e])\n# c.addChild(g)\n# a.addChildren([b,c])\n# b.addChildren([f,e])\n# c.addChild(d)\n# d.addChildren([e])\n# s.addParents([d])\n# s.addChildren([e])\n# d.addChild(r)\n\n# t = Node(VarSet(['T']))\n# f = Node(VarSet(['F']))\n# g = Node(VarSet(['G']))\n# d = Node(VarSet(['D']))\n# s = Node(VarSet(['S']))\n# e = Node(VarSet(['E']))\n# r = Node(VarSet(['R']))\n# a = Node(VarSet(['A']))\n# b = Node(VarSet(['B']))\n# \n# t.addChildren([f, s, g])\n# f.addChildren([a,d])\n# g.addChildren([e,b,r])\n# d.addChildren([b])\n# s.addChildren([a,b])\n# e.addChildren([a])\n#\n# dag = DAG(t)\n# \n# nextNode = dag.getNextNode()\n# while nextNode != None:\n# for n in dag.traverse():\n# print n, '(children:',\n# for c in n.children:\n# print c,\n# print ')'\n# print \n#\n# print 'processing', nextNode \n# processNode(dag, nextNode)\n# nextNode = dag.getNextNode()\n# \n# for n in dag.traverse():\n# print n, '(children:',\n# for c in n.children:\n# print c,\n# print ')'\n# print ","sub_path":"python3/prac/pracutils/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":13265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"459881605","text":"from scgpm_lims import Connection\nimport subprocess\nfrom gbsc_utils import gbsc_utils\n\n#module load rundir/current\n\nconn = Connection()\n\ndef getRunInfo(run):\n\t\"\"\"\n\tFunction :\n\tArgs : run - A sequencing run name.\n\tReturns :\n\t\"\"\"\n\tri = conn.getruninfo(run)['run_info']\n\treturn ri\n\ndef getLaneInfo(run,lane):\n\t\"\"\"\n\tFunction : Returns the lane information from a lane on a sequencing run.\n\tArgs : run - A sequencing run name.\n\t\t\t\t\t\t lane - int. Lane number.\n\tReturns : dict.\n\t\"\"\"\n\tri = getRunInfo(run)\n\treturn ri['lanes'][str(lane)]\n\ndef isSequencingFailed(run):\n\tri = getRunInfo(run)\n\trun_status = ri[\"sequencing_run_status\"]\n\tif (run_status == \"sequencing_failed\") or (run_status == \"sequencing_exception\"):\n\t\treturn True\n\treturn False\n\ndef isSequencingDone(run):\n\tri = getRunInfo(run)\n\trun_status = ri[\"sequencing_run_status\"]\n\tif run_status == \"sequencing_done\":\n\t\treturn True\n\treturn False\n\ndef getPipelineRuns(run):\n\t\"\"\"\n\tFunction : Retrieves the finished and unfinished pipeline run IDs for the specified sequencing run from UHTS.\n\tArgs : run - str. The name of a sequencing run.\n\tReturns : A two item tuple where the 1st item is a list of finished pipeline run IDs, and the 2nd is a list of unfinished pipeline run IDs.\n\t\"\"\"\n\tri = getRunInfo(run)\n\n\tfinishedRuns = []\n\tnotFinishedRuns = []\n\tpruns = ri['pipeline_runs'] #pruns = pipelineRuns\n\tfor runId in pruns: \n\t\tprd = pruns[runId] #prd = pipelienRunData\n\t\tfinished = prd['finished']\n\t\tif finished:\n\t\t\tfinishedRuns.append(runId)\n\t\telse:\n\t\t\tnotFinishedRuns.append(runId)\n\treturn (finishedRuns,notFinishedRuns)\n\ndef getMaxPipelineRunId(run):\n\t\"\"\"\n\tFunction :\n\tArgs : run - str. Sequencing run name.\n\tReturns : int, or the None object if no pipeline runs.\n\t\"\"\"\n\tri = getRunInfo(run)\n\tpruns = ri['pipeline_runs'] #pruns = pipelineRuns\n\tif not pruns:\n\t\treturn None\n\tmaxPrunId = max([int(x) for x in pruns.keys()])\n\treturn maxPrunId\n\t\ndef setLatestPipelineRunToFinished(run):\n\t\"\"\"\n\tFunction : Of the pipeline runs of the specified run, sets the one with the largest ID (integer) to finished.\n\t If no pipeline runs, nothing happens.\n\tArgs : run - str. Sequencing run name.\n\tReturns : \n\t\"\"\"\n\tmaxPrunId = getMaxPipelineRunId(run)\n\tcmd = \"endrun.py --pipeline_id {maxPrunId} {run}\".format(maxPrunId=maxPrunId,run=run)\n\tgbsc_utils.createSubprocess(cmd)\n\n\ndef isArchivingDone(run):\n\tri = getRunInfo(run)\n\treturn ri['archiving_done']\n","sub_path":"uhts/uhts_utils.py","file_name":"uhts_utils.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"61759271","text":"from visual import *\nfrom visual.graph import *\n\npi=3.1415926535\nR=0.5 #radius of inductor\nmeu0=4*pi*10**-7\nN=5 #num of all turns\nNL=6 #num of inductors\nl=4 #lenth of coil in metres\nA=pi*R*R\nL=meu0*N*N*A/l\ndis=3 #distance from the magnetic\nr=0.001 # resistance of the coil\nBmag= 1#Bmag\nMag = cylinder(pos=(0,0,0),axis=vector(1,0,0),radius=1,color=color.red)\nMagS = cylinder(pos=(0,0,0),axis=vector(-1,0,0),radius=1,color=color.blue)\nfd=60\nomega=2*pi*fd\ndtheta=0.1\ndt=dtheta/omega\nt=0\n#curve\nscene1 = gdisplay(x=0, y=0, width=1000, height=400, xtitle='t', ytitle='v', background=(0.2, 0.6, 0.2))\nv_t = gcurve(color=color.red, gdisplay = scene1)\ni_0 = gcurve(color=color.red, gdisplay = scene1)\ni_1 = gcurve(color=color.blue, gdisplay = scene1)\ni_2 = gcurve(color=color.green, gdisplay = scene1)\n\n#inductors\nfor i in range(NL):\n inductors = cylinder(pos=vector(dis*cos(2*pi*i/NL),dis*sin(2*pi*i/NL),0),axis=(l*cos(2*pi*i/NL),l*sin(2*pi*i/NL),0),radius=R)\n\n#find B of small megnet\ndef B(r,Baxis):\n B=meu0/(4*pi*abs(r)**3)*(3*Baxis.dot(r.norm())*r.norm()-Baxis)\n # consider Baxis as the Magnetic moment because they have same direction \n return B\n\n#find mag flux\ndef flux(posL,Baxis,Laxis):\n flux=0\n d=abs(posL-Baxis)\n flux=B(posL,Baxis).dot(A*Laxis.norm())\n return (flux)\n #flux=Bmag*abs(cos(diff_angle(Baxis,Laxis)))*A/d\n #return flux\n \ndef V(posL,Baxis,Laxis):\n V=0\n flux0=0\n flux1=0\n flux0=flux(posL,Baxis,Laxis)\n Baxis2=rotate(Baxis,angle=0.1,axis=(0,0,1))\n flux1=flux(posL,Baxis2,Laxis)\n V=(flux1-flux0)/dt\n return V\n\nwhile(true):\n rate(100)\n t+=dt\n Vt=0\n for i in range(NL):\n posL=vector(dis*cos(2*pi*i/NL),dis*sin(2*pi*i/NL),0)\n Laxis=vector(l*cos(2*pi*i/NL),l*sin(2*pi*i/NL),0)\n #if(i==0): ##find the current of coils\n # i_0.plot(pos=(t,(V(posL,Mag.axis,Laxis)-V(posL,MagS.axis,Laxis))/r))\n #if(i==1):\n # i_1.plot(pos=(t,(V(posL,Mag.axis,Laxis)-V(posL,MagS.axis,Laxis))/r))\n #if(i==2):\n # i_2.plot(pos=(t,(V(posL,Mag.axis,Laxis)-V(posL,MagS.axis,Laxis))/r))\n Vl=V(posL,Mag.axis,Laxis)\n V2=-V(posL,MagS.axis,Laxis)##emf cause by the other half megnet\n Vt+=Vl+V2\n v_t.plot(pos=(t,Vt))\n Baxis=Mag.axis\n Mag.axis = rotate(Baxis,angle=0.1,axis=(0,0,1))\n MagS.axis = -Mag.axis\n \n","sub_path":"final/inductance-yang.py","file_name":"inductance-yang.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"371719789","text":"import unittest\nfrom HTMLTestRunner import HTMLTestRunner\n\nsuite = unittest.TestSuite()\ntests = unittest.defaultTestLoader.discover(r\"G:\\PycharmProjects\\day17\",pattern=\"Test*.py\")\nsuite.addTests(tests)\nf = open(file=\"测试报告.html\",mode=\"w+\",encoding=\"utf-8\")\nrunner = HTMLTestRunner.HTMLTestRunner(\n stream= f ,\n title=\"计算器测试报告\",\n verbosity=1,\n description=\"执行了加减乘除用例测试\"\n)\n\nrunner.run(suite)\n","sub_path":"day17/测试入口.py","file_name":"测试入口.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"588393517","text":"#!/usr/bin/env python3\nimport RPi.GPIO as GPIO\nimport time\nimport os, requests\n\nclass PullUpCounter:\n\n def __init__(self):\n #GPIO Mode (BOARD / BCM)\n GPIO.setmode(GPIO.BCM)\n\n #set GPIO Pins\n self.GPIO_TRIGGER_HAND = 18\n self.GPIO_ECHO_HAND = 24\n\n self.GPIO_TRIGGER_FACE = 22\n self.GPIO_ECHO_FACE = 27\n\n self.state = 'IDLE'\n\n self.startDistanceThreshold = 30\n self.faceThreshold = 30\n\n self.timeout = 5\n self.faceCount = 0\n\n self.time = time.time()\n\n self.server_url = 'https://pullomatic.herokuapp.com/'\n\n #set GPIO direction (IN / OUT)\n GPIO.setup(self.GPIO_TRIGGER_HAND, GPIO.OUT)\n GPIO.setup(self.GPIO_ECHO_HAND, GPIO.IN)\n\n GPIO.setup(self.GPIO_TRIGGER_FACE, GPIO.OUT)\n GPIO.setup(self.GPIO_ECHO_FACE, GPIO.IN)\n\n def distance(self,trigger):\n # set Trigger to HIGH\n if trigger == \"handTrigger\":\n GPIO_TRIGGER = self.GPIO_TRIGGER_HAND\n GPIO_ECHO = self.GPIO_ECHO_HAND\n elif trigger == \"faceTrigger\":\n GPIO_TRIGGER = self.GPIO_TRIGGER_FACE\n GPIO_ECHO = self.GPIO_ECHO_FACE\n\n GPIO.output(GPIO_TRIGGER, True)\n\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(GPIO_TRIGGER, False)\n\n StartTime = time.time()\n StopTime = time.time()\n\n # save StartTime\n while GPIO.input(GPIO_ECHO) == 0:\n StartTime = time.time()\n\n # save time of arrival\n while GPIO.input(GPIO_ECHO) == 1:\n StopTime = time.time()\n\n # time difference between start and arrival\n TimeElapsed = StopTime - StartTime\n # multiply with the sonic speed (34300 cm/s)\n # and divide by 2, because there and back\n distance = (TimeElapsed * 34300) / 2\n\n return distance\n\n def run(self):\n while True:\n faceDistance = self.distance('faceTrigger')\n handDistance = self.distance('handTrigger')\n if self.state == 'IDLE':\n\n # handDistance = self.distance('handTrigger')\n if handDistance < self.startDistanceThreshold:\n requests.post(self.server_url + 'session_start')\n self.time = time.time()\n print('start in session')\n self.state = 'IN_SESSION'\n\n elif self.state == 'IN_SESSION':\n print('in session')\n if time.time() - self.time > self.timeout:\n print('timed out')\n files = {'avatar': open('image.jpg', 'rb')}\n requests.post(self.server_url + 'session_stop', files=files)\n self.time = time.time()\n self.state = 'IDLE'\n\n # faceDistance = self.distance('faceTrigger')\n # handDistance = self.distance('handTrigger')\n if faceDistance < self.faceThreshold:\n self.faceCount += 1\n if self.faceCount > 2:\n self.faceCount = 0\n os.system('raspistill -vf -hf -o image.jpg --nopreview --exposure sports --drc high --timeout 1')\n print('entering pullup')\n self.state = 'IN_PULLUP'\n\n elif self.state == 'IN_PULLUP':\n print('in pullup')\n self.time = time.time()\n # faceDistance = self.distance('faceTrigger')\n if faceDistance > self.faceThreshold:\n requests.post(self.server_url + 'do_pullup')\n self.state = 'IN_SESSION'\n\nif __name__ == '__main__':\n try:\n pullup = PullUpCounter()\n pullup.run()\n # Reset by pressing CTRL + C\n except KeyboardInterrupt:\n print(\"Measurement stopped by User\")\n GPIO.cleanup()\n","sub_path":"pullUp.py","file_name":"pullUp.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"650158075","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as Data\nimport random\nimport math\nimport os\nimport numpy as np\nfrom seq2seq.performancePlot import plotDistance, computeDistance\n\nclass Encoder(nn.Module):\n def __init__(self, input_dim, emb_dim, hid_dim, n_layers, dropout):\n super().__init__()\n\n self.input_dim = input_dim\n self.hid_dim = hid_dim\n self.n_layers = n_layers\n self.dropout = dropout\n emb_dim = input_dim\n\n self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout=dropout, bidirectional= True)\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, src):\n # embedded = [sent len, batch size, emb dim]\n outputs, (hidden, cell) = self.rnn(src)\n\n # outputs = [sent len, batch size, hid dim * n directions]\n # hidden = [n layers * n directions, batch size, hid dim]\n # cell = [n layers * n directions, batch size, hid dim]\n\n # outputs are always from the top hidden layer\n\n return hidden, cell\n\n\nclass Decoder(nn.Module):\n def __init__(self, output_dim, emb_dim, hid_dim, n_layers, dropout):\n super().__init__()\n\n self.emb_dim = emb_dim\n self.hid_dim = hid_dim\n self.output_dim = output_dim\n self.n_layers = n_layers\n self.dropout = dropout\n\n self.embedding = nn.Embedding(num_embeddings=output_dim, embedding_dim= emb_dim)\n\n self.rnn = nn.LSTM(emb_dim, hid_dim, n_layers, dropout=dropout, bidirectional= True)\n\n self.out = nn.Linear(2*hid_dim, output_dim)\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, input, hidden, cell):\n # input = [batch size]\n # hidden = [n layers * n directions, batch size, hid dim]\n # cell = [n layers * n directions, batch size, hid dim]\n\n # n directions in the decoder will both always be 1, therefore:\n # hidden = [n layers, batch size, hid dim]\n # context = [n layers, batch size, hid dim]\n\n input = input.unsqueeze(0)\n\n # input = [1, batch size]\n embedded = self.dropout(self.embedding(input))\n\n # embedded = [1, batch size, emb dim]\n output, (hidden, cell) = self.rnn(embedded, (hidden, cell))\n\n # output = [sent len, batch size, hid dim * n directions]\n # hidden = [n layers * n directions, batch size, hid dim]\n # cell = [n layers * n directions, batch size, hid dim]\n\n # sent len and n directions will always be 1 in the decoder, therefore:\n # output = [1, batch size, hid dim]\n # hidden = [n layers, batch size, hid dim]\n # cell = [n layers, batch size, hid dim]\n\n prediction = self.out(output.squeeze(0))\n\n # prediction = [batch size, output dim]\n\n return prediction, hidden, cell\n\n\nclass Seq2Seq(nn.Module):\n def __init__(self, encoder, decoder, device):\n super().__init__()\n\n self.encoder = encoder\n self.decoder = decoder\n self.device = device\n\n assert encoder.hid_dim == decoder.hid_dim, \"Hidden dimensions of encoder and decoder must be equal!\"\n assert encoder.n_layers == decoder.n_layers, \"Encoder and decoder must have equal number of layers!\"\n\n def forward(self, src, trg, teacher_forcing_ratio=0.1):\n # src = [sent len, batch size]\n # trg = [sent len, batch size]\n # teacher_forcing_ratio is probability to use teacher forcing\n # e.g. if teacher_forcing_ratio is 0.75 we use ground-truth inputs 75% of the time\n\n batch_size = trg.shape[1]\n max_len = trg.shape[0]\n trg_vocab_size = self.decoder.output_dim\n\n # tensor to store decoder outputs\n outputs = torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device)\n\n # last hidden state of the encoder is used as the initial hidden state of the decoder\n hidden, cell = self.encoder(src)\n\n # first input to the decoder is the zero tokens\n input = torch.zeros(batch_size,dtype=torch.long).to(self.device)\n\n for t in range(0, max_len):\n\n output, hidden, cell = self.decoder(input, hidden, cell)\n outputs[t] = output\n teacher_force = random.random() < teacher_forcing_ratio\n top1 = output.max(1)[1]\n\n input = (trg[t] if teacher_force else top1)\n\n return outputs\n\n\ndef train(model, iterator, optimizer, criterion, clip):\n model.train()\n\n epoch_loss = 0\n total = 0\n correct = 0\n for i, (src, trg) in enumerate(iterator):\n src = torch.transpose(src, 0, 1)\n trg = torch.transpose(trg, 0, 1)\n\n optimizer.zero_grad()\n\n output = model(src, trg)\n\n # trg = [sent len, batch size]\n # output = [sent len, batch size, output dim]\n\n # reshape to:\n # trg = [(sent len - 1) * batch size]\n # output = [(sent len - 1) * batch size, output dim]\n loss = criterion(output.view([-1,output.shape[2]]), trg.reshape(4*128))\n\n loss.backward()\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n\n optimizer.step()\n\n total += trg.size(1)\n\n _, predicted = torch.max(output.data, 2)\n\n for vec in range(trg.size(1)):\n correct += (predicted[:,vec] == (trg[:,vec])).all().item()\n\n\n epoch_loss += loss.item()\n print(correct, total, len(iterator))\n return epoch_loss / len(iterator), correct / total\n\n\ndef evaluate(model, iterator, criterion):\n\n model.eval()\n\n epoch_loss = 0\n total = 0\n correct = 0\n predicted_dis = 0\n optimal_dis = 0\n with torch.no_grad():\n for i, (src, trg) in enumerate(iterator):\n src = torch.transpose(src, 0, 1)\n trg = torch.transpose(trg, 0, 1)\n\n output = model(src, trg, 0) # turn off teacher forcing\n\n loss = criterion(output.view([-1, output.shape[2]]), trg.reshape(4 * 128))\n\n epoch_loss += loss.item()\n total += trg.size(1)\n\n _, predicted = torch.max(output.data, 2)\n\n for vec in range(trg.size(1)):\n correct += (predicted[:, vec] == (trg[:, vec])).all().item()\n predicted_dis += computeDistance(torch.transpose(src, 0, 1), torch.transpose(predicted, 0, 1))\n optimal_dis += computeDistance(torch.transpose(src, 0, 1), torch.transpose(trg, 0, 1))\n if i+1%10 == 0:\n print(predicted_dis, optimal_dis)\n return epoch_loss / len(iterator), correct/total, \\\n predicted_dis/total, optimal_dis/total\n\n\ndef loading_data(num_robots):\n \"\"\"\n (1): Load data from distanceMatrices.csv and assignmentMatrices.csv\n (2): Split data with the reference of number of robots\n :return: groups of training data and test data\n \"\"\"\n import pandas\n\n print(\"Obtain training data\")\n #distanceMatrices = np.loadtxt('distanceMatrices.csv', dtype=float)\n #assignmentMatrices = np.loadtxt('assignmentMatrices.csv', dtype=int)\n distanceMatrices = pandas.read_csv('../data/4x4_SeqData/distanceMatrices.csv',\n header=None,\n nrows=200000,\n sep=' ',\n dtype='float')\n distanceMatrices = distanceMatrices.values\n assignmentMatrices = pandas.read_csv('../data/4x4_SeqData/assignmentMatrices.csv',\n header=None,\n nrows=200000,\n sep=' ',\n dtype='float')\n assignmentMatrices = assignmentMatrices.values\n print(\"Finish loading data\")\n\n # y_train = to_categorical(y_train)\n N, M = assignmentMatrices.shape\n assert num_robots == M\n assignmentMatrices = assignmentMatrices.reshape(N, num_robots)\n\n # Create a MxNxM matrices,within which matrices[i,:,:] is the ground truth for model i\n N, M = distanceMatrices.shape\n distanceMatrices = distanceMatrices.reshape(N, num_robots, num_robots)\n\n NTrain = int(0.9*N)\n X_train = distanceMatrices[:NTrain, ] # the training inputs we will always use\n X_test = distanceMatrices[NTrain:, ] # for testing\n y_train = assignmentMatrices[:NTrain,:]\n y_test = assignmentMatrices[NTrain:,:]\n print(\"Obtain training data: robots: {}, samples: {}\".format(num_robots, N))\n\n return torch.tensor(X_train,device= device).float(), torch.tensor(y_train,device= device).long(), \\\n torch.tensor(X_test,device= device).float(), torch.tensor(y_test,device= device).long()\n\n\"\"\"\nInitialize model\n\"\"\"\nnum_robots = 4\nBATCH_SIZE = 128\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nX_train, y_train, X_test, y_test = loading_data(num_robots = num_robots)\n\ntrain_dataset = torch.utils.data.TensorDataset(X_train, y_train)\ntrain_iterator = Data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True,drop_last=True)\n\ntest_dataset = torch.utils.data.TensorDataset(X_test, y_test)\ntest_iterator = Data.DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True,drop_last=True)\n\n\nINPUT_DIM = num_robots\nOUTPUT_DIM = num_robots\nENC_EMB_DIM = num_robots\nDEC_EMB_DIM = num_robots\nHID_DIM = 512\nN_LAYERS = 2\nENC_DROPOUT = 0.5\nDEC_DROPOUT = 0.5\n\nenc = Encoder(INPUT_DIM, ENC_EMB_DIM, HID_DIM, N_LAYERS, ENC_DROPOUT)\ndec = Decoder(OUTPUT_DIM, DEC_EMB_DIM, HID_DIM, N_LAYERS, DEC_DROPOUT)\n\nmodel = Seq2Seq(enc, dec, device).to(device)\n\n\noptimizer = optim.Adam(model.parameters())\n\ncriterion = nn.CrossEntropyLoss()\n\ntraining = True\n\"\"\"\nTrain model\n\"\"\"\nif training:\n N_EPOCHS = 100\n CLIP = 10\n SAVE_DIR = 'models/bidirectional'\n\n\n if not os.path.isdir('{}'.format(SAVE_DIR)):\n os.makedirs('{}'.format(SAVE_DIR))\n\n for epoch in range(N_EPOCHS):\n\n train_loss, acc = train(model, train_iterator, optimizer, criterion, CLIP)\n\n if (epoch+1) % 1 == 0:\n MODEL_SAVE_PATH = os.path.join(SAVE_DIR, 'tut1_model'+str(epoch+1)+'.pt')\n torch.save(model.state_dict(), MODEL_SAVE_PATH)\n\n print(\n '| Epoch: {} | Train Loss: {} | Train PPL: {} | Train Accuracy: {}'.format(epoch+1, train_loss, math.exp(train_loss), acc))\nelse:\n \"\"\"\n Test model\n \"\"\"\n N_EPOCHS = 100\n res_train = []\n res = []\n optimal_train = []\n optimal = []\n test_acc_list = []\n\n for epoch in range(0, N_EPOCHS):\n SAVE_DIR = 'models/bidirectional'\n MODEL_SAVE_PATH = os.path.join(SAVE_DIR, 'tut1_model' + str(epoch + 1) + '.pt')\n model.load_state_dict(torch.load(MODEL_SAVE_PATH))\n\n #train_loss, train_acc, avg_tr_pred_dis, avg_tr_optimal_dis = evaluate(model, train_iterator, criterion)\n test_loss, test_acc, avg_pred_dis, avg_optimal_dis = evaluate(model, test_iterator, criterion)\n\n optimal_train.append(avg_optimal_dis)\n res_train.append(avg_pred_dis)\n #optimal.append(avg_tr_optimal_dis)\n #res.append(avg_tr_pred_dis)\n\n test_acc_list.append(test_acc)\n\n print('EPOCH: {} | Test acc: {} '.format(epoch+1,test_acc))#, train_acc))\n\n #plotDistance(iterations=np.linspace(1, N_EPOCHS, N_EPOCHS), optimalDistance=np.asarray(optimal_train),\n # totalDistances=np.asarray(res_train))\n import os\n if not os.path.exists('results/row_bi'):\n os.makedirs('results/row_bi')\n with open('results/row_bi/optimal_dist_row_bidirectional.csv', 'wb') as f:\n np.savetxt(f,\n np.asarray(optimal_train),\n newline='\\n',\n fmt='%d')\n with open('results/row_bi/predicted_dist_row_bidirectional.csv', 'wb') as f:\n np.savetxt(f,\n np.asarray(res_train),\n newline='\\n',\n fmt='%d')\n with open('results/row_bi/test_accurracy_row_bidirectional.csv', 'wb') as f:\n np.savetxt(f,\n np.asarray(test_acc_list),\n newline='\\n',\n fmt='%f')\n\n #from matplotlib import pyplot as plt\n #plt.plot(np.linspace(1, N_EPOCHS, N_EPOCHS),test_acc_list)\n #plt.xlabel(\"test accuracy\")\n #plt.show()\n #plotDistance(iterations=np.linspace(1,N_EPOCHS,N_EPOCHS), optimalDistance= np.asarray(optimal),\n # totalDistances= np.asarray(res))\n\n","sub_path":"bidirectionnal_seq2seq_4x4.py","file_name":"bidirectionnal_seq2seq_4x4.py","file_ext":"py","file_size_in_byte":12292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"296732966","text":"# script for generating voltage points for scl simulation with mplabx\n#\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# sample frequency\nfs = 4943\n# set frequency\n#f = 82.4 #e2\nf = 329.6 #e4\n# set fundamental frequency amplitude in mili volts\nA = 300\n# set some sample size\nsamples = 512\n# amplifyer circuit voltage bias in mV\nvbias = 1800\n# time scale\nt = np.arange(0,samples/fs,1.0/fs) #entire chunk of signal\n#t = np.arange(0,1/f,1.0/fs) #one period of signal\n\n# fundemental frequency\nfundamental = A*np.sin(2 * np.pi * f * t) \n# add some harmonics\nsecond_harmonic = 3*A*np.sin(2 * np.pi *2* f * t) \nthird_harmonic = 2*A*np.sin(2 * np.pi *3* f * t) \n\n# signal y(t) is the sum of its harmonics\nyt = (fundamental + second_harmonic + third_harmonic) + vbias\n\nmaxi = np.argmax(yt)\nmini = np.argmin(yt)\nif yt[mini] < 0:\n print(\"signal below 0 V\")\n \nmid = (yt[maxi] + yt[mini])/2 \npk_pk = yt[maxi] - yt[mini]\n\n# plot the signal\nplt.plot(t,yt)\nplt.axhline(mid,color='orange')\nplt.axhline(0,color='black')\nplt.show()\n\nprint (\"target f\",f, \"| mV peak\", np.int16(yt[maxi]), \\\n \"\\npeak to peak\", np.int16(pk_pk), \"| offset\", np.int16(mid))\n\n\n# open and overwrite file\nfile = open(\"../guitar_tuner.X/datapoints.txt\",\"w\")\n\nfor i in range(len(yt)):\n file.write('{0} {1}\\n'.format(np.uint16(yt[i]),\" mV\"))\nfile.close()\n","sub_path":"py tools/f_to_mv.py","file_name":"f_to_mv.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"418986793","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport csv\nimport numpy as np\nfrom sklearn import svm\nimport pandas as pd\nfrom sklearn.linear_model import SGDClassifier\n\nvertex_list = []\ntrain_data = []\nlabel = []\n#刪除陣列中重複數值的函示\ndef remove_duplicates(values):\n output = []\n seen = set()\n for value in values:\n # If value has not been encountered yet,\n # ... add it to both list and set.\n if value not in seen:\n output.append(value)\n seen.add(value)\n return output\n\n\n################### main #################################### \n \n#讀csv檔 (Period1)\nwith open('Period1.csv', newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n \n for row in spamreader:\n #一行行切成3段\n string =', '.join(row).split(\",\",2)\n #將第二段與第三段加進list\n vertex_list.extend([ int(string[1]) , int(string[2]) ])\n \n #將陣列值進行處理,將重複的數值刪除後排序,留下唯一值\n vertex_list = remove_duplicates(vertex_list)\n sorted(vertex_list)\n #測試陣列長度\n print( \"Period1 node數: {}\".format(len(vertex_list)) )\n \n#讀csv檔 (Period2)\nwith open('Period2.csv', newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n \n for row in spamreader:\n #一行行切成3段\n string =', '.join(row).split(\",\",2)\n #將第二段與第三段加進list\n vertex_list.extend([ int(string[1]) , int(string[2]) ])\n \n #將陣列值進行處理,將重複的數值刪除後排序,留下唯一值\n vertex_list = remove_duplicates(vertex_list)\n sorted(vertex_list)\n #測試陣列長度\n print(\"Period1,2 node總數: {}\".format(len(vertex_list))) \n\n#讀csv檔 (TestData)\nTestData_node = []\nwith open('TestData.csv', newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n \n for row in spamreader:\n #一行行切成3段\n string =', '.join(row).split(\",\",2)\n #將第二段與第三段加進list\n TestData_node.extend([ int(string[1]) , int(string[2]) ])\n \n #將陣列值進行處理,將重複的數值刪除後排序,留下唯一值\n TestData_node = remove_duplicates(TestData_node)\n sorted(TestData_node)\n #測試陣列長度\n print(\"TestData node總數: {}\".format(len(TestData_node)))\n \n#宣告NxN的陣列,且內容均為0\nn=len(vertex_list) \nMatrix = np.full( ( n, n ),0 )\n\n#開始填值,將Period2 的 link 以1填入\nprint(\"start Period2\")\n#讀csv檔 ( Period2 )\nwith open('Period2.csv', newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in spamreader:\n #一行行切成3段\n string =', '.join(row).split(\",\",2)\n #將此link標註1\n i= vertex_list.index( int(string[1]) )\n j= vertex_list.index( int(string[2]) )\n Matrix[i][j] = 1\n train_data += [ [ int(string[1]) , int(string[2]) ] ]\n label += [ 1 ]\n \nprint(\"Period2 done\")\n \n#開始填值,將Period1 的 link 以1填入\nprint(\"start Period1\")\n#讀csv檔 ( Period1 )\nwith open('Period1.csv', newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in spamreader:\n #一行行切成3段\n string =', '.join(row).split(\",\",2)\n #將此link標註2\n i= vertex_list.index( int(string[1]) )\n j= vertex_list.index( int(string[2]) )\n Matrix[i][j] = 1\n train_data += [ [ int(string[1]) , int(string[2]) ] ]\n label += [ 0 ]\n \nprint(\"Period1 done\") \n\n\n\n#開始用SVM進行trainning\nprint(\"start trainning1\")\n'''\nk=0.01\nfor i in range(n):\n if vertex_list[i] in TestData_node :\n for j in range(n):\n if (vertex_list[j] in TestData_node and Matrix[i][j] != 1 and i!=j ):\n train_data += [ [ vertex_list[i] , vertex_list[j] ] ]\n label += [ Matrix[i][j] ]\n \n if( i/(n/100) > k ):\n print( \"目前進度: {}\".format( i/(n/100) ) )\n k+=0.01\n'''\nprint(\"start trainning2\")\nclassifier = SGDClassifier(loss=\"hinge\", penalty=\"l2\")\nclassifier.fit( train_data , label ) \nprint(\"finish trainning\") \n\n#讀csv檔 (TestData)\nanswer = []\nwith open('TestData.csv', newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n \n for row in spamreader:\n #一行行切成3段\n string =', '.join(row).split(\",\",2)\n #i= vertex_list.index(string[1])\n #j= vertex_list.index(string[2])\n #進行辨識\n result = classifier.predict([ [ int(string[1]) , int(string[2]) ] ])\n \n answer.extend([ (result[0]+1)%2 ])\n\n\n#寫檔 \ntarget_id=np.arange(10000)\ndataframe = pd.DataFrame({'target id':target_id , 'label':answer})\ndataframe.to_csv(\"result.csv\",index=False,sep=',')\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n","sub_path":"linkPrediction.py","file_name":"linkPrediction.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"584582283","text":"# Capture image from the camera.\n# Convert image from RGB to HSV color model.\n# Obtain maximum value of value channel and calculate DV threshold.\n# Group pixels that fit DV threshold.\n# For each group do:\n# a. Compute diameter and centroid.\n# b. If the proportion of cropped area size and cropped area\n# threshold > 0.3 get next group.\n# c. Recognize laser spot color and add data to the local list.\n# Update the global laser spot list from the local list.\n# Go to the step 1.\n# 140~170정도가 적당해 보임\n\n\n\nimport cv2\n\nim=cv2.imread(\"../../images/bright_spot.jpg\")\ngray=cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n\n\nret, thresh_gray = cv2.threshold(gray,180,255,cv2.THRESH_BINARY)\n# im=cv2.cvtColor(im,cv2.COLOR_BGR2HSV)\n\ncv2.imshow(\"origin\",im)\ncv2.waitKey(0)\ncv2.imshow(\"gray\",gray)\ncv2.waitKey(0)\nmul=5\nfloor=140\nfor i in range(5):\n ret, thresh_gray = cv2.threshold(gray,floor+mul*i,255,cv2.THRESH_BINARY)\n cv2.imshow(str(ret)+\"thresh\",thresh_gray)\n cv2.waitKey(0)","sub_path":"code/find_optimal_threshold.py","file_name":"find_optimal_threshold.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"34041275","text":"import os\nimport shutil\nfrom utils.logger import *\n\ndef check_and_makedirs(dir_path):\n if bool(dir_path) and not os.path.exists(dir_path):\n info(\"creating directory %s\" % dir_path)\n os.makedirs(dir_path)\n\n # maybe_copy(flags.dataset_conf, \"%s/dataset.cfg\" % exp_dir)\ndef maybe_copy(src, dest):\n if os.path.exists(dest):\n info(\"%s exists, using that\" % dest)\n else:\n shutil.copyfile(src, dest)\n","sub_path":"src/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"533674393","text":"# Dynamic Difficulty\n################################################################################\n# In this activity, you’ll continue to update your Python blockchain\n# application. Specifically, you’ll allow dynamic updates to the mining\n# difficulty through a Streamlit component.\n\n\n# You will need to complete the following steps:\n# 1. Add a `difficulty` attribute to the `PyChain` data class.\n# 2. Add a `num_of_zeros` data attribute to the `PyChain` data classes\n# `proof_of_work` method.\n# 3. Add a Streamlit component that allows a user to select the `difficulty`\n# value of the `proof_of_work` method.\n# 4. Test the application.\n################################################################################\nimport streamlit as st\nfrom dataclasses import dataclass\nfrom typing import Any, List\nimport datetime as datetime\nimport pandas as pd\nimport hashlib\n\n################################################################################\n# Creates the Block data class\n\n\n@dataclass\nclass Block:\n data: Any\n creator_id: int\n timestamp: str = datetime.datetime.utcnow().strftime(\"%H:%M:%S\")\n prev_hash: str = 0\n nonce: str = 0\n\n def hash_block(self):\n sha = hashlib.sha256()\n\n data = str(self.data).encode()\n sha.update(data)\n\n creator_id = str(self.creator_id).encode()\n sha.update(data)\n\n prev_hash = str(self.prev_hash).encode()\n sha.update(prev_hash)\n\n timestamp = str(self.timestamp).encode()\n sha.update(timestamp)\n\n nonce = str(self.nonce).encode()\n sha.update(nonce)\n\n return sha.hexdigest()\n\n################################################################################\n# Step 1:\n# Add a `difficulty` data attribute to the `PyChain` data class.\n# Use a data type of `int` and a default value of 4.\n\n\n@dataclass\nclass PyChain:\n chain: List[Block]\n\n # @TODO:\n # Add a `difficulty` data attribute with a data type of `int` and a default\n # value of 4.\n difficulty: int = 4\n\n# Step 2:\n# Add a `num_of_zeros` data attribute that multiplies the string value (\"0\") by the `difficulty` value.\n def proof_of_work(self, block):\n calculated_hash = block.hash_block()\n\n # @TODO:\n # Add a `num_of_zeros` data attribute that multiplies the string value (\"0\") by the `difficulty` value.\n num_of_zeros = \"0\" * self.difficulty\n\n while not calculated_hash.startswith(num_of_zeros):\n block.nonce += 1\n calculated_hash = block.hash_block()\n\n print(\"Wining Hash\", calculated_hash)\n return block\n\n def add_block(self, candidate_block):\n block = self.proof_of_work(candidate_block)\n self.chain += [block]\n\n################################################################################\n# Streamlit Code\n\n# Adds the cache decorator for Streamlit\n\n\n@st.cache(allow_output_mutation=True)\ndef setup():\n print(\"Initializing Chain\")\n return PyChain([Block(data=\"Genesis\", creator_id=0)])\n\n\npychain = setup()\n\nst.markdown(\"# PyChain\")\nst.markdown(\"## Store Data in the Chain\")\n\ninput_data = st.text_input(\"Block Data\")\n\n################################################################################\n# Step 3:\n# Add a Streamlit component that can update the `difficulty` attribute of the `PyChain` class. To do so, complete the following steps:\n# 1. Add a Streamlit slider component that allows the user to select a\n# difficulty value from 1 to 5. Set the starting value to 4. Set this\n# component equal to a variable named `difficulty`.\n# 2.Update the `difficulty` data attribute of the `PyChain` data class (`pychain.difficulty`) with this new `difficulty` value.\n\n# @TODO:\n# Add a Streamlit slider named \"Block Difficulty\" that allows the user to update a difficulty value. Set this equal to the variable `difficulty`\ndifficulty = st.sidebar.slider(\"Block Difficulty\", 1, 5, 4)\n\n# @TODO\n# Update the `difficulty` data attribute of the `PyChain` data class (`pychain.difficulty`) with this new `difficulty` value\npychain.difficulty = difficulty\n\n\nif st.button(\"Add Block\"):\n prev_block = pychain.chain[-1]\n prev_block_hash = prev_block.hash_block()\n\n new_block = Block(data=input_data, creator_id=42, prev_hash=prev_block_hash)\n\n pychain.add_block(new_block)\n\n st.write(\"Winning Hash\", new_block.hash_block())\n\nst.markdown(\"## PyChain Ledger\")\npychain_df = pd.DataFrame(pychain.chain)\n\nst.write(pychain_df)\n\n\n################################################################################\n# Step 4:\n# Test the application.\n\n# Complete the following steps:\n# 1. In the terminal, navigate to the `Unsolved` folder for this activity.\n# 2. Run the Streamlit app in the terminal by using `streamlit run app.py`.\n# 3. Type some input text in the text box, and then click the Add Block button.\n# This adds a block to the chain.\n# 4. Change the difficulty, and then add another block. Observe how this affects the overall mining time (the time that it takes to add a block to the\n# chain with the proof of work enabled).\n\n################################################################################\n","sub_path":"18_Blockchain_with_Python/05-Dynamic_Difficulty/Solved/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"245756822","text":"#!/usr/bin/python3.3\n\"\"\"\nTODO\n\"\"\"\n__author__ = 'heliosantos99@gmail.com (Helio Santos)'\n\nfrom filesystemCrawlerLib import FilesystemCrawler\nfrom parsers import loadmatchrulesfromfile\n\nfrom operators import Printer\nfrom operators import Archiver\nfrom operators import FileDeleter\nfrom ePPOperators.operators import EventLogGuidsParser\nfrom os import path\nimport msvcrt\nimport argparse\n\n\ndef main():\n args = parse_arguments()\n basedirPath, matchesPath = parse_config_paths(args)\n\n matchrules, errors = loadmatchrulesfromfile(basedirPath, matchesPath)\n\n print(\"\\n[Match Rules]\")\n for rule in matchrules:\n print('{type} {polarity} {pattern}'.format(\n type='d' if rule.dirsOnly else 'f' if rule.filesOnly else ' ',\n polarity='+' if rule.polarity else '-',\n pattern=rule.pattern.pattern), end='')\n\n print('' if rule.contentPattern is None else\n \" containing '%s'\" % rule.contentPattern)\n\n if errors:\n print(\"\\n[Match Errors]\")\n for line, error in errors:\n print('Line {line}: {error}'.format(\n line=str(line), error=str(error)))\n\n operators = []\n operators.append((Printer(), 'P'))\n operators.append((Archiver(), 'A'))\n operators.append((FileDeleter(), 'D'))\n operators.append((EventLogGuidsParser(), 'R'))\n\n print(\"\\n[Operators]\")\n for operator, key in operators:\n print('press {key} to select the {name} - {description}'.format(\n key=key, name=operator.__class__.__name__,\n description=operator.description))\n print(\"\\npress [Enter] to execute\\n\")\n\n selectedOperators = []\n op = 0\n while op != 13: # while Enter is not pressed\n op = ord(msvcrt.getch())\n for operator, key in operators:\n if ord(key) == op or ord(key.lower()) == op:\n print(operator.__class__.__name__)\n selectedOperators.append(operator)\n\n crawler = FilesystemCrawler(selectedOperators, matchrules)\n matchedCounter, faileddirectories = crawler.search(basedirPath)\n\n print('\\n{counter} matches'.format(counter=matchedCounter))\n if faileddirectories:\n print('\\n[Failed Directories]')\n for failed in faileddirectories:\n print(failed)\n\n print(\"\\nAll done\")\n msvcrt.getch()\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--conf_dir',\n help='the directory with the configuration files')\n args = parser.parse_args()\n return args\n\n\ndef parse_config_paths(args):\n confDir = (path.dirname(path.realpath(__file__)) if\n args.conf_dir is None else args.conf_dir)\n\n basedirLocationPath = path.join(confDir, 'FilesystemCrawler.basedir')\n matchesPath = path.join(confDir, 'FilesystemCrawler.matches')\n\n basedir = confDir\n if path.isfile(basedirLocationPath):\n with open(basedirLocationPath) as f:\n basedir = f.read().strip()\n\n return basedir, matchesPath\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"490586053","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nfrom flask import Flask\nfrom flask import request\napp = Flask(__name__)\n\n\n#from gmail import send_mail\nfrom webex_teams import post_message\nfrom serviceNowInc import open_incident\nfrom slack import post_SlackMessage\nfrom msTeams import post_msTeamsMessage\n\ndef old_format_event(event):\n\n header = 'Event:{}, Severity:{}, Category:{}\\n'.format(event['title'],\n event['severity'],\n event['category'])\n message = '{}\\n'.format(event['description'])\n\n message += \"\\nSuggested Actions:\\n\"\n if event['title'] == \"Device Image Outdated\":\n device = event['enrichmentInfo']['connectedDevice'][0]['deviceDetails']\n image = event['enrichmentInfo']['imageDetails']['goldenImage']['imageVersion']\n message += \"Update device {}[{}] running {} to {}\\n\".format(device['hostname'],\n device['managementIpAddress'],\n device['softwareVersion'],\n image)\n else:\n for action in event['enrichmentInfo']['issueDetails']['issue'][0]['suggestedActions']:\n message += \" -{}\\n\".format(action['message'])\n\n caller = \"Event Management\"\n categoryList = \"Network\"\n subcategoryList = \"Internal Application\"\n businessServiceLookup = \"IT Services\"\n cmdb_ciLookup = \"\"\n contact_typeList = \"Alert\"\n stateList = \"New\"\n impact = \"3\"\n urgency = \"1\"\n assignmentGroupLookup = \"Network\"\n assigned_toLookup = \"\"\n\n return caller, categoryList, subcategoryList, businessServiceLookup, cmdb_ciLookup, contact_typeList, stateList, impact, urgency, assignmentGroupLookup, assigned_toLookup, header, message\n\ndef new_format_event(dnac,event):\n header = 'Event:{}, Category:{}\\n'.format(event['eventId'], event['category'])\n message = '\\n'.join([ '{}:{}'.format(k,v) for k,v in event['details'].items()])\n if 'ciscoDnaEventLink' in event:\n message += \"\\nEventURL: https://{}/{}\".format(dnac,event['ciscoDnaEventLink'])\n caller = \"Event Management\"\n categoryList = \"Network\"\n subcategoryList = \"Internal Application\"\n businessServiceLookup = \"IT Services\"\n cmdb_ciLookup = \"\"\n contact_typeList = \"Alert\"\n stateList = \"New\"\n impact = \"3\"\n urgency = \"1\"\n assignmentGroupLookup = \"Network\"\n assigned_toLookup = \"\"\n\n return caller, categoryList, subcategoryList, businessServiceLookup, cmdb_ciLookup, contact_typeList, stateList, impact, urgency, assignmentGroupLookup, assigned_toLookup, header, message\n\ndef format_event(dnac,event):\n if 'title' in event:\n return(old_format_event(event))\n else:\n return (new_format_event(dnac,event))\n\ndef handle(dnac, event):\n '''\n handles an event. Can send an email, or message to webex.\n :param event:\n :return:\n '''\n caller, categoryList, subcategoryList, businessServiceLookup, cmdb_ciLookup, contact_typeList, stateList, impact, urgency, assignmentGroupLookup, assigned_toLookup, header, message = format_event(dnac, event)\n print(message)\n\n # send to webex\n post_message(\"*******\\n\" + header + message)\n\n # send an email\n #send_mail(header,message)\n\n\n #send to ServiceNow Incident\n open_incident(caller, categoryList, subcategoryList, businessServiceLookup, cmdb_ciLookup, contact_typeList, stateList, impact, urgency, assignmentGroupLookup, assigned_toLookup, header, message)\n\n #Send to Slack URL\n post_SlackMessage(\"*******\\n\" + header + message)\n\n #Send to MS Teams as a Notification\n post_msTeamsMessage(header, message)\n\n@app.route('/', defaults={'path': ''}, methods=['GET','POST'])\n@app.route('/', methods=[\"GET\",\"PUT\",\"POST\",\"DELETE\"])\n\ndef get_all(path):\n print(\"Method {}, URI {}\".format(request.method,path))\n if request.method == \"POST\":\n print (request.headers)\n print (request.json)\n if request.json != {}:\n handle(request.remote_addr, request.json)\n else:\n print(\"skipping - empty\")\n return (\"OK\")\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=\"9000\", ssl_context='adhoc')\n","sub_path":"WebHookServer/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"494095091","text":"from datetime import datetime\nfrom urllib.request import urlopen\nfrom subprocess import check_output\n\n######################\n# DOWNLOAD FUNCTIONS #\n######################\n\n#url -> html\ndef get_html(url):\n html = ''\n try:\n html = check_output(['wget', '-qO-', url]).decode()\n except Exception as e:\n print(e)\n return html\n\n##################\n# TIME FUNCTIONS #\n##################\n\n#None -> str(datetime)\ndef get_utc_datetime():\n try:\n data = get_html('http://www.timeapi.org/utc/now')\n data = data[:data.find('+')]\n data = data.replace('T',' ')\n except Exception as e:\n data=''\n return data\n\n#None -> str(datetime)\ndef get_machine_datetime():\n now = datetime.now()\n ret = now.strftime('%Y-%m-%d %H:%M:%S')\n return ret\n\n######################\n# CLEANING FUNCTIONS #\n######################\n\n#str(html) -> double(value)\ndef clean_html(raw_html):\n re_sta = ''\n re_end = ''\n starting = raw_html.find(re_sta)\n ending = raw_html.find(re_end, starting)\n html = raw_html[starting+len(re_sta):ending]\n value = html[html.find('>')+1:]\n value = value.replace(',','')\n return float(value)\n\n\n#raw_html = get_html(\"http://finance.yahoo.com/q/bc?s=%5EIPSA\")\n#raw_html = get_html(\"http://finance.yahoo.com/q/bc?s=BVMF3.SA\")\n\n\n","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"263556977","text":"'''\r\nNumpy library: Provides a way to handle matrices \r\nLinaig module: used to calculate eigenvalues and eigenvectors\r\nopencv2.0: graphics library\r\n'''\r\n\r\nfrom numpy import *\r\nfrom numpy import linalg as la\r\nimport cv2 as cv\r\n\r\n'''\r\n function: img2vector\r\n usage: temp_array = img2vector(temp_img)\r\n This function calls the reshape function \r\n in numpy to reduce the two-dimensional matrix\r\n to a one-dimensional vector\r\n'''\r\n\r\n\r\ndef img2vector(img):\r\n\r\n rows, cols = img.shape\r\n imgVector = zeros((1,rows*cols)) \r\n imgVector = reshape(img,(1,rows*cols))\r\n return imgVector\r\n\r\n'''\r\n function: average\r\n usage: array_average = average(array1)\r\n Calculate the integer mean value \r\n of the row vector of the matrix\r\n'''\r\n\r\ndef average(array):\r\n return mean(array,0,int)\r\n\r\n'''\r\n function: refine\r\n usage: array_refine - refine(array_average, array1)\r\n Calculate the mean face\r\n'''\r\n\r\ndef refine(array_average, array1):\r\n refine1 = []\r\n for member in array1:\r\n temp_refine = subtract(member, array_average)\r\n refine1.append(temp_refine)\r\n return refine1\r\n\r\n####################### global scope #######################\r\n\r\n# read image\r\n\r\nstatic_shape = cv.imread(\"face1.jpg\",0).shape\r\n\r\n# The 36 images to be processed are dimensionally reduced\r\n\r\narray1 = []\r\nfor i in range(0, 36):\r\n temp_img = cv.imread(\"face\" + str(i + 1) + \".jpg\",0) \r\n # Since the image sizes are not the same, \r\n # resize all the images against the size of the first image\r\n temp_img = cv.resize(temp_img,static_shape)\r\n temp_array = img2vector(temp_img)\r\n array1.append(temp_array[0])\r\n\r\n# Find the updated matrix\r\n\r\narray_average = average(array1)\r\narray_refine = refine(array_average, array1)\r\n\r\n# Matrix transpose\r\narray_refine = array(array_refine)\r\narray_tran = array_refine.T\r\n\r\n# Dot matrix multiplication\r\n\r\narray_multi = dot(array_refine, array_tran)\r\n\r\n# calculate eigenvalues and eigenvectors\r\n\r\nu,v= linalg.eig(array_multi)\r\n\r\n# Using eigenvector and original matrix to calculate characteristic face\r\n\r\nresult1 = []\r\nfor i in range(0, 6):\r\n temp_m = dot(array_tran, v[i])\r\n result1.append(temp_m)\r\n\r\n# Change the data type to int\r\n\r\nfor i in range(0, 6):\r\n for j in range(0, static_shape[1] * static_shape[0]):\r\n result1[i][j] = int(result1[i][j])\r\n\r\n# Reshape the two-dimensional matrix and save it as a picture\r\n\r\nfor i in range(0, 6):\r\n temp_r = reshape(result1[i],static_shape)\r\n cv.imwrite(\"result\"+str(i)+\".jpg\", temp_r)\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"5/给定人脸/faceprocess.py","file_name":"faceprocess.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"214590458","text":"from dotenv import load_dotenv\nimport os\nimport json\nfrom datetime import datetime, timedelta, date\nfrom dateutil.parser import parse as is_date\n\n# Import namespaces\nimport azure.cognitiveservices.speech as speech_sdk\nfrom playsound import playsound\n\n\n\n\ndef main():\n\n try:\n # Get Configuration Settings\n load_dotenv(dotenv_path='./.env')\n lu_app_id = os.getenv('LU_APP_ID')\n lu_prediction_region = os.getenv('LU_PREDICTION_REGION')\n lu_prediction_key = os.getenv('LU_PREDICTION_KEY')\n\n # Configure speech service and get intent recognizer\n speech_config = speech_sdk.SpeechConfig(subscription=lu_prediction_key, region=lu_prediction_region)\n audio_config = speech_sdk.AudioConfig(use_default_microphone=True)\n recognizer = speech_sdk.intent.IntentRecognizer(speech_config, audio_config)\n\n\n\n # Get the model from the AppID and add the intents we want to use\n model = speech_sdk.intent.LanguageUnderstandingModel(app_id=lu_app_id)\n intents = [\n (model, \"GetTime\"),\n (model, \"GetDate\"),\n (model, \"GetDay\"),\n (model, \"None\")\n ]\n recognizer.add_intents(intents)\n\n\n\n # Process speech input\n intent = ''\n print('Please say your request...')\n result = recognizer.recognize_once_async().get()\n\n \n if result.reason == speech_sdk.ResultReason.RecognizedIntent:\n print('result=',result) \n\n intent = result.intent_id\n print(\"Query: {}\".format(result.text))\n print(\"Intent: {}\".format(intent))\n json_response = json.loads(result.intent_json)\n print(\"JSON Response:\\n{}\\n\".format(json.dumps(json_response, indent=2)))\n \n # Get the first entity (if any)\n entity_type = ''\n entity_value = ''\n if len(json_response[\"entities\"]) > 0:\n entity_type = json_response[\"entities\"][0][\"type\"]\n entity_value = json_response[\"entities\"][0][\"entity\"]\n print(entity_type + ': ' + entity_value)\n\n \n # Apply the appropriate action\n if intent == 'GetTime':\n location = 'local'\n # Check for entities\n if entity_type == 'Location':\n location = entity_value\n # Get the time for the specified location\n print(GetTime(location))\n\n elif intent == 'GetDay':\n date_string = date.today().strftime(\"%m/%d/%Y\")\n # Check for entities\n if entity_type == 'Date':\n date_string = entity_value\n # Get the day for the specified date\n print(GetDay(date_string))\n\n elif intent == 'GetDate':\n day = 'today'\n # Check for entities\n if entity_type == 'Weekday':\n # List entities are lists\n day = entity_value\n # Get the date for the specified day\n print(GetDate(day))\n\n else:\n # Some other intent (for example, \"None\") was predicted\n print('You said {}'.format(result.text))\n if result.text.lower().replace('.', '') == 'stop':\n intent = result.text\n else:\n print('Try asking me for the time, the day, or the date.')\n\n \n elif result.reason == speech_sdk.ResultReason.RecognizedSpeech:\n # Speech was recognized, but no intent was identified.\n intent = result.text\n print(\"I don't know what {} means.\".format(intent))\n elif result.reason == speech_sdk.ResultReason.NoMatch:\n # Speech wasn't recognized\n print(\"Sorry. I didn't understand that.\")\n elif result.reason == speech_sdk.ResultReason.Canceled:\n # Something went wrong\n print(\"Intent recognition canceled: {}\".format(result.cancellation_details.reason))\n if result.cancellation_details.reason == speech_sdk.CancellationReason.Error:\n print(\"Error details: {}\".format(result.cancellation_details.error_details))\n\n\n\n except Exception as ex:\n print(ex)\n\n\ndef GetTime(location):\n time_string = ''\n\n # Note: To keep things simple, we'll ignore daylight savings time and support only a few cities.\n # In a real app, you'd likely use a web service API (or write more complex code!)\n # Hopefully this simplified example is enough to get the the idea that you\n # use LU to determine the intent and entitites, then implement the appropriate logic\n\n if location.lower() == 'local':\n now = datetime.now()\n time_string = '{}:{:02d}'.format(now.hour,now.minute)\n elif location.lower() == 'london':\n utc = datetime.utcnow()\n time_string = '{}:{:02d}'.format(utc.hour,utc.minute)\n elif location.lower() == 'sydney':\n time = datetime.utcnow() + timedelta(hours=11)\n time_string = '{}:{:02d}'.format(time.hour,time.minute)\n elif location.lower() == 'new york':\n time = datetime.utcnow() + timedelta(hours=-5)\n time_string = '{}:{:02d}'.format(time.hour,time.minute)\n elif location.lower() == 'nairobi':\n time = datetime.utcnow() + timedelta(hours=3)\n time_string = '{}:{:02d}'.format(time.hour,time.minute)\n elif location.lower() == 'tokyo':\n time = datetime.utcnow() + timedelta(hours=9)\n time_string = '{}:{:02d}'.format(time.hour,time.minute)\n elif location.lower() == 'delhi':\n time = datetime.utcnow() + timedelta(hours=5.5)\n time_string = '{}:{:02d}'.format(time.hour,time.minute)\n else:\n time_string = \"I don't know what time it is in {}\".format(location)\n \n return time_string\n\ndef GetDate(day):\n date_string = 'I can only determine dates for today or named days of the week.'\n\n weekdays = {\n \"monday\":0,\n \"tuesday\":1,\n \"wednesday\":2,\n \"thusday\":3,\n \"friday\":4,\n \"saturday\":5,\n \"sunday\":6\n }\n\n today = date.today()\n\n # To keep things simple, assume the named day is in the current week (Sunday to Saturday)\n day = day.lower()\n if day == 'today':\n date_string = today.strftime(\"%m/%d/%Y\")\n elif day in weekdays:\n todayNum = today.weekday()\n weekDayNum = weekdays[day]\n offset = weekDayNum - todayNum\n date_string = (today + timedelta(days=offset)).strftime(\"%m/%d/%Y\")\n\n return date_string\n\ndef GetDay(date_string):\n # Note: To keep things simple, dates must be entered in US format (MM/DD/YYYY)\n try:\n date_object = datetime.strptime(date_string, \"%m/%d/%Y\")\n day_string = date_object.strftime(\"%A\")\n except:\n day_string = 'Enter a date in MM/DD/YYYY format.'\n return day_string\n\nif __name__ == \"__main__\":\n main()","sub_path":"11-luis-speech/Python/speaking-clock-client/speaking-clock-client.py","file_name":"speaking-clock-client.py","file_ext":"py","file_size_in_byte":6974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"232576611","text":"from django import forms\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.core.validators import FileExtensionValidator\nfrom .models import Nexus\nfrom musimatica.models import Score\nfrom notio.models import Notio\n\nclass NexusForm(forms.ModelForm):\n title = forms.CharField(max_length=70, required=True, initial='Untitled', label_suffix='')\n description = forms.CharField(max_length=140, label='Description', label_suffix='', required=False, widget=forms.Textarea)\n is_published = forms.BooleanField(required=False, widget=forms.HiddenInput(), label='', label_suffix='', initial=True)\n score = forms.ModelChoiceField(\n queryset=Score.objects.all(),\n required=False,\n widget=forms.HiddenInput(),\n label='',\n label_suffix='',\n )\n notio = forms.ModelChoiceField(\n queryset=Notio.objects.all(),\n required=False,\n widget=forms.HiddenInput(),\n label='',\n label_suffix='',\n )\n \n class Meta:\n model = Nexus\n fields = [\n 'title',\n 'description',\n 'is_published',\n 'score',\n 'notio',\n ]\n \nclass NexusMediaForm(forms.ModelForm):\n image = forms.ImageField(required=False)\n audio = forms.FileField(\n required=False,\n validators=[FileExtensionValidator(allowed_extensions=['wav', 'mp3', 'm4a'])],\n )\n video = forms.FileField(\n required=False,\n validators=[FileExtensionValidator(allowed_extensions=['avi', 'wmv', 'mov', 'mp4'])],\n )\n \n class Meta:\n model = Nexus\n fields = [\n 'image',\n 'audio',\n 'video',\n ]\n \n","sub_path":"nexus/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"491415630","text":"from panda3d.core import Vec3\nfrom panda3d.core import CollisionSphere\n\nimport wecs\n\nfrom wecs.aspects import Aspect\nfrom wecs.aspects import factory\n\nfrom wecs.panda3d.constants import FALLING_MASK\nfrom wecs.panda3d.constants import BUMPING_MASK\nfrom wecs.panda3d.constants import CAMERA_MASK\n\nimport behaviors\n\nfrom avatar_ui import Embodiable\n\n\n# Map\n\ngame_map = Aspect(\n [\n wecs.panda3d.prototype.Model,\n wecs.panda3d.prototype.Geometry,\n wecs.panda3d.prototype.CollidableGeometry,\n # wecs.panda3d.prototype.FlattenStrong,\n # wecs.panda3d.mouseover.MouseOverableGeometry,\n # wecs.panda3d.mouseover.Pointable,\n wecs.panda3d.spawnpoints.SpawnMap,\n ],\n overrides={\n wecs.panda3d.prototype.CollidableGeometry: dict(\n mask=FALLING_MASK|BUMPING_MASK|CAMERA_MASK,\n ),\n },\n)\n\n\n# Props\n\nprop = Aspect(\n [\n wecs.panda3d.prototype.Model,\n wecs.panda3d.prototype.Geometry,\n wecs.panda3d.character.BumpingMovement,\n wecs.panda3d.spawnpoints.SpawnAt,\n ],\n)\n\n\n# There are characters, which are points in space that can be moved\n# around using the `CharacterController`, using either player input or\n# AI control.\n\ncharacter = Aspect(\n [\n wecs.mechanics.clock.Clock,\n wecs.panda3d.prototype.Model,\n wecs.panda3d.character.CharacterController,\n wecs.panda3d.spawnpoints.SpawnAt,\n ],\n overrides={\n wecs.mechanics.clock.Clock: dict(\n clock=lambda: factory(wecs.mechanics.clock.panda3d_clock),\n ),\n wecs.panda3d.character.CharacterController: dict(\n gravity=Vec3(0, 0, -30),\n ),\n },\n)\n\n\n# Avatars are characters which have (presumably humanoid) animated\n# models that can walk around. Their entities can be found using the\n# mouse cursor or other collision sensors.\n\nanimated = Aspect(\n [\n wecs.panda3d.prototype.Actor,\n wecs.panda3d.animation.Animation,\n ],\n)\n\n\nwalking = Aspect(\n [\n wecs.panda3d.character.WalkingMovement,\n wecs.panda3d.character.InertialMovement,\n wecs.panda3d.character.BumpingMovement,\n wecs.panda3d.character.FallingMovement,\n wecs.panda3d.character.JumpingMovement,\n ],\n overrides={\n wecs.panda3d.character.WalkingMovement:dict(\n speed=500.0,\n ),\n wecs.panda3d.character.JumpingMovement:dict(\n impulse=Vec3(0, 0, 10),\n ),\n }\n)\n\n\navatar = Aspect(\n [\n character,\n animated,\n walking,\n #wecs.panda3d.mouseover.MouseOverable,\n #wecs.panda3d.mouseover.Targetable,\n Embodiable,\n ],\n overrides={\n wecs.panda3d.character.WalkingMovement: dict(\n turning_speed=40.0,\n #turning_speed=540.0,\n ),\n },\n)\n\n\n# Disembodied entities are simply characters that can float.\n# FIXME: They should probably also fall/bump into things.\n\ndisembodied = Aspect(\n [\n character,\n wecs.panda3d.character.FloatingMovement,\n ],\n)\n\n\nfirst_person = Aspect(\n [\n wecs.panda3d.camera.Camera,\n wecs.panda3d.camera.MountedCameraMode,\n ],\n)\n\n\nthird_person_base = Aspect(\n [\n wecs.panda3d.camera.Camera,\n wecs.panda3d.camera.ObjectCentricCameraMode,\n wecs.panda3d.camera.CollisionZoom,\n wecs.panda3d.character.AutomaticTurningMovement,\n ],\n overrides={\n wecs.panda3d.camera.ObjectCentricCameraMode: dict(\n turning_speed=180.0,\n ),\n },\n)\n\n\nthird_person_action = Aspect(\n [\n third_person_base,\n wecs.panda3d.character.TurningBackToCameraMovement,\n ],\n overrides={\n wecs.panda3d.character.TurningBackToCameraMovement: dict(\n view_axis_alignment=0.4,\n threshold=0.2,\n ),\n },\n)\n\n\nthird_person_twin_stick = Aspect(\n [\n third_person_base,\n wecs.panda3d.character.CameraReorientedInput,\n wecs.panda3d.character.TwinStickMovement,\n ],\n overrides={\n wecs.panda3d.camera.ObjectCentricCameraMode: dict(\n pitch=-30.0,\n ),\n },\n)\n\n\n# The action camera uses the 'camera_movement' context to rotate the\n# camera. Twin stick uses the 'character_direction' context to indicate\n# where to face.\nthird_person = third_person_action\n#third_person = third_person_twin_stick\n\n\n# Player interface / AI.\n# Note that these aren't mutually exclusive. Both can exert control over\n# the `CharacterController`. If `Input.contexts` includes\n# 'character_movement', AI input is overwritten by player input; If it\n# doesn't, it isn't.\n# The player interface also can control the NPC AI, using the entity to\n# send commands to it if no other entity is selected as recipient.\n\npc_mind = Aspect(\n [\n wecs.panda3d.input.Input,\n #wecs.panda3d.mouseover.MouseOveringCamera,\n #wecs.panda3d.mouseover.UserInterface,\n ],\n overrides={\n wecs.panda3d.input.Input: dict(\n contexts={\n 'character_movement',\n #'character_direction',\n 'camera_movement',\n 'camera_zoom',\n 'mouse_over',\n 'select_entity',\n },\n ),\n },\n)\n\n\nnpc_behaviors = lambda: dict(\n #idle=wecs.panda3d.ai.idle,\n idle=behaviors.idle(),\n walk_to_entity=behaviors.walk_to_entity(),\n)\n\n\nnpc_mind = Aspect(\n [\n wecs.panda3d.ai.BehaviorAI,\n #wecs.panda3d.mouseover.Selectable,\n ],\n overrides={\n wecs.panda3d.ai.BehaviorAI: dict(\n behavior=['idle'],\n behaviors=lambda: npc_behaviors(),\n ),\n },\n)\n\n\n# Game Objects, finally!\n# An observer is a disembodied, player-controlled character.\n# A player_character is a player-controlled avatar\n# A non_player_character is an AI-controlled avatar.\n\nobserver = Aspect(\n [\n disembodied,\n first_person,\n pc_mind,\n ],\n)\n\n\nplayer_character = Aspect(\n [\n avatar,\n third_person,\n pc_mind,\n npc_mind,\n ],\n)\n\n\nnon_player_character = Aspect(\n [\n avatar,\n npc_mind,\n ],\n)\n\n\n# WECS' default 3D character is Rebecca, and these are her parameters.\n\ndef rebecca_bumper():\n return {\n 'bumper': dict(\n node_name='bumper',\n #shape=CollisionSphere,\n #center=Vec3(0.0, 0.0, 1.0),\n #radius=0.7,\n debug=True,\n ),\n }\n\n\ndef rebecca_lifter():\n return {\n 'lifter': dict(\n node_name='lifter',\n #shape=CollisionSphere,\n #center=Vec3(0.0, 0.0, 0.5),\n #radius=0.5,\n debug=True,\n ),\n }\n\n\nrebecca = {\n wecs.panda3d.prototype.Geometry: dict(\n file='models/character/rebecca.bam',\n ),\n wecs.panda3d.prototype.Actor: dict(\n file='models/character/rebecca.bam',\n ),\n wecs.panda3d.character.BumpingMovement: dict(\n node_name='bumper',\n tag_name='bumper',\n solids=factory(rebecca_bumper),\n #debug=True,\n ),\n wecs.panda3d.character.FallingMovement: dict(\n node_name='lifter',\n tag_name='lifter',\n solids=factory(rebecca_lifter),\n #debug=True,\n ),\n wecs.panda3d.mouseover.MouseOverable: dict(\n solid=CollisionSphere(0, 0, 1, 1),\n ),\n}\n\n\n# A park bench\n\nbench = {\n wecs.panda3d.prototype.Geometry: dict(\n file='models/props/parkbench.bam',\n ),\n wecs.panda3d.character.BumpingMovement: dict(\n node_name='bumper',\n tag_name='bumper',\n #debug=True,\n ),\n}\n","sub_path":"examples/panda3d-cutting-edge/aspects.py","file_name":"aspects.py","file_ext":"py","file_size_in_byte":7577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"393291669","text":"\"Go through all publications and compile journals with names and ISSN.\"\n\nfrom __future__ import print_function\n\nfrom publications import constants\nfrom publications import settings\nfrom publications import utils\nfrom publications.journal import JournalSaver\n\ndef get_args():\n parser = utils.get_command_line_parser(\n 'Compile all journals in publications.')\n return parser.parse_args()\n\ndef compile_journals(db):\n \"Compile journals as dictionary with key ISSN, list of titles as values.\"\n result = {}\n for row in db.view('publication/published', include_docs=True):\n journal = row.doc.get('journal')\n title = journal.get('title')\n issn = journal.get('issn') or ''\n if title:\n result.setdefault(issn, set()).add(title)\n return result\n\ndef create_journals(db, journals):\n \"Create the journal documents.\"\n for issn, titles in journals.items():\n if not titles:\n titles = [issn]\n for title in titles:\n with JournalSaver(db=db) as saver:\n saver['issn'] = issn\n saver['title'] = title\n\n\nif __name__ == '__main__':\n args = get_args()\n utils.load_settings(filepath=args.settings)\n db = utils.get_db()\n journals = compile_journals(db)\n create_journals(db, journals)\n","sub_path":"publications/scripts/compile_journals.py","file_name":"compile_journals.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"372272100","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport sys\n\nfrom arch.MainWindow import MainWindow\nfrom arch.file import File\n\n\ndef check_args(args):\n d = args.directory\n if not os.path.exists(d) or not os.path.isdir(d):\n sys.stderr.write(\"File not found\")\n sys.exit(1)\n max_level = args.depth\n exclude = list(map(clear_ext, args.exclude.split()))\n mask = args.mask\n return d, max_level, exclude, mask\n\n\ndef clear_ext(ext):\n return ext if ext[0] == '.' else '.' + ext\n\n\ndef start(d, max_level, exclude, mask):\n f = File(d, 1, exclude, mask)\n print(list(map(lambda x: str(x), f.files)))\n app = MainWindow(f, max_level)\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"DiskUsage\")\n parser.add_argument('--depth', '-d', type=int,\n default=float('inf'),\n help=\"Depth of finding files and directories\")\n parser.add_argument('--exclude', '-e', type=str,\n default=\"\",\n help=\"Exclude extensions\")\n parser.add_argument('--mask', '-m', type=str, default='*',\n help='Mask to filter')\n\n parser.add_argument('directory', type=str,\n default=\"\",\n help=\"Path to directory\")\n args = parser.parse_args()\n d, max_level, exclude, mask = check_args(args)\n start(d, max_level, exclude, mask)\n","sub_path":"diskusage.py","file_name":"diskusage.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"647455675","text":"import os\nimport glob\nimport time\nimport datetime\nimport pickle\nimport argparse\nimport socket\n\nos.system('modprobe w1-gpio')\nos.system('modprobe w1-therm')\n\nbase_dir = '/sys/bus/w1/devices/'\ndevice_folder = glob.glob(base_dir + '28*')[0]\ndevice_file = device_folder + '/w1_slave'\n\ncount = 0\ntemps = []\n\n# send payload_pickle via socket\ndef PickleObject(temp, time):\n\tid = 1\n\tpayload = {'id':id, 'temp':temp, 'time':time}\n\tpayload_pickle = pickle.dumps(payload)\n\treturn payload_pickle\n\t\ndef SendMessage(address, port, temp, time):\n\ttry:\n\t\tsentPickle = PickleObject(temp, time)\n\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\ts.connect((address, int(port)))\n\t\ts.sendall(sentPickle)\n\texcept Exception as e:\n\t\tprint(\"Error sending message: \" + str(e))\n\tfinally:\n\t\ts.close()\n\ndef read_temp_raw():\n\tf = open(device_file, 'r')\n\tlines = f.readlines()\n\tf.close()\n\treturn lines\n\ndef read_temp():\n\tlines = read_temp_raw()\n\twhile lines[0].strip()[-3:] != 'YES':\n\t\ttime.sleep(0.2)\n\t\tlines = read_temp_raw()\n\tequals_pos = lines[1].find('t=')\n\tif equals_pos != -1:\n\t\ttemp_string = lines[1][equals_pos+2:]\n\t\ttemp_c = float(temp_string) / 1000.0\n\t\ttemp_f = temp_c * 9.0 / 5.0 + 32.0\n\t\tprint(\"Temperature Read: \" , temp_f)\n\t\treturn temp_c, temp_f\n\t\t\n# Parse command line arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('-i', required=True)\nparser.add_argument('-p', required=True)\nargs = parser.parse_args()\n\nip_address = args.i\nport = args.p\n\nwhile True:\n\ttemp_c, temp_f = read_temp()\n\tif temp_f == 32:\n\t\tbreak\n\telse:\n\t\ttemps.append(temp_f)\n\t\tcount += 1\n\t\tif count == 5:\n\t\t\ttimeRead = datetime.datetime.now()\n\t\t\ttimeReadStr = timeRead.strftime('%Y/%m/%d %H:%M:%S')\n\t\telif count == 10:\n\t\t\ttempToSend = sum(temps)/len(temps)\n\t\t\tprint(timeReadStr)\n\t\t\tprint(tempToSend)\n\t\t\tdel temps[:]\n\t\t\tSendMessage(ip_address, port, tempToSend, timeReadStr)\n\t\t\tcount = 0\n\t\ttime.sleep(0.5)\n","sub_path":"thermometer_1.py","file_name":"thermometer_1.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"342700094","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 12 20:48:50 2020\r\n\r\n@author: MG\r\n\r\n\"\"\"\r\nimport numpy as np\r\nimport scipy.io as spio # to import matlab\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator)\r\n\r\n\r\n# Initial parameters\r\n#Choose compression ratio according to data: 14 or 19\r\n\r\nCR=14 # compression ratio, other option CR=19\r\n\r\nSpeed=900 # rpm\r\n\r\nif CR==14:\r\n Data = spio.loadmat('Data_CR14.mat')\r\nelif CR==19:\r\n Data = spio.loadmat('Data_CR19.mat')\r\n \r\nP_Test1 = Data ['P_mot'] # import variable from mat file\r\nP_Test2 = Data ['P_adv5']\r\nP_Test3 = Data ['P_adv11']\r\nP_Test4 = Data ['P_adv15']\r\n\r\nP_Test1=np.squeeze(P_Test1) #removes axes with length 1\r\nP_Test2=np.squeeze(P_Test2)\r\nP_Test3=np.squeeze(P_Test3)\r\nP_Test4=np.squeeze(P_Test4)\r\n\r\n\r\n\r\n\r\nSOI_Test1=0 # start of injection, CAD BTDC\r\nSOI_Test2=5\r\nSOI_Test3=11\r\nSOI_Test4=15\r\n\r\n#labels for plots\r\n\r\nlabel_Test1='Motoring'\r\nlabel_Test2='SOI 5'\r\nlabel_Test3='SOI 11'\r\nlabel_Test4='SOI 15'\r\n\r\n#labels for bar plot\r\nlabels_line_all = [label_Test1, label_Test2, label_Test3, label_Test4]\r\nlabels_line_fired = [label_Test2, label_Test3, label_Test4]\r\n\r\n\r\n\r\n\r\n\r\nfrom fc_volume import volume #call function to calculate volume\r\nVd, Vc, Vth, dVth, cad =volume (CR)\r\n\r\n#Create cylinder volume plot\r\n\r\nfig, ax = plt.subplots()\r\nax.grid()\r\nax.set_ylabel('Volume, $cm^3$')\r\nax.set_xlabel('CAD \\n \\n Fig.1. Cylinder Volume During Engine Cycle')\r\n\r\nmaxval=np.max(Vth*10**6)\r\nylim=maxval+50\r\nax.set_xlim(-360, 360)\r\nax.set_ylim(0, ylim)\r\n\r\nax.xaxis.set_major_locator(MultipleLocator(60)) # distribute major ticks on x axis\r\n\r\nline, = ax.plot(cad-360, Vth*10**6, label=('Volume'))\r\n\r\nplt.show()\r\n\r\n# Create cylinder pressure plot\r\nfig, ax = plt.subplots()\r\nax.grid()\r\nax.set_ylabel('Pressure, bar')\r\nax.set_xlabel('CAD \\n \\n Fig.2. Pressure changes in cylinder')\r\n\r\nmaxval=np.max([np.max(P_Test1), np.max(P_Test2), np.max(P_Test3), np.max(P_Test4)])\r\nylim=maxval+5\r\nax.set_xlim(-100, 100)\r\nax.set_ylim(0, ylim)\r\nax.xaxis.set_major_locator(MultipleLocator(20)) # distribute major ticks on x axis\r\nline, = ax.plot(cad-360, P_Test1, label=label_Test1)\r\nline, = ax.plot(cad-360, P_Test2, label=label_Test2)\r\nline, = ax.plot(cad-360, P_Test3, label=label_Test3)\r\nline, = ax.plot(cad-360, P_Test4, label=label_Test4)\r\nax.legend()\r\nplt.show()\r\n\r\n\r\n# Create cylinder pressure/volume plot\r\nfig, ax = plt.subplots()\r\nax.grid() #create grid\r\nax.set_ylabel('Pressure, bar')\r\nax.set_xlabel('Volume, $cm^3$ \\n \\n Fig.3. Pressure/ volume diagram')\r\n\r\nmaxval=np.max([np.max(P_Test1), np.max(P_Test2), np.max(P_Test3), np.max(P_Test4)])\r\nylim=maxval+5\r\nax.set_xlim(0, 750) #set axe limits\r\nax.set_ylim(0, ylim)\r\nline, = ax.plot(Vth*10**6, P_Test1, label=label_Test1) #create multilple line plots\r\nline, = ax.plot(Vth*10**6, P_Test2, label=label_Test2)\r\nline, = ax.plot(Vth*10**6, P_Test3, label=label_Test3)\r\nline, = ax.plot(Vth*10**6, P_Test4, label=label_Test4)\r\nax.legend()\r\nplt.show()\r\n\r\n# Create cylinder pressure/volume log plot\r\nfig, ax = plt.subplots()\r\nax.grid() #create grid\r\nax.set_ylabel('Pressure, bar')\r\nax.set_xlabel('Volume, $cm^3$ \\n \\n Fig.4. Pressure/ volume diagram in log scale')\r\n\r\nmaxval=np.log(np.max([np.max(P_Test1), np.max(P_Test2), np.max(P_Test3), np.max(P_Test4)]))\r\nylim=maxval+5\r\n#ax.set_xlim(0, 750) #set axe limits\r\n#ax.set_ylim(0, ylim)\r\nax.set_xscale('log')\r\nax.set_yscale('log')\r\nax.xaxis.set_major_locator(MultipleLocator(30)) # distribute major ticks on x axis\r\nax.yaxis.set_major_locator(MultipleLocator(10)) # distribute major ticks on y axis\r\nline, = ax.plot((Vth*10**6), (P_Test1), label=label_Test1) #create multilple line plots\r\nline, = ax.plot(Vth*10**6, P_Test2, label=label_Test2)\r\nline, = ax.plot(Vth*10**6, P_Test3, label=label_Test3)\r\nline, = ax.plot(Vth*10**6, P_Test4, label=label_Test4)\r\nax.legend()\r\nplt.show()\r\n\r\n\r\n\r\nfrom fc_maxp import maxp #call function to find maximal pressure and angle\r\nPMax_Test1, PMax_cad_Test1 =maxp (P_Test1, cad)\r\nPMax_Test2, PMax_cad_Test2 =maxp (P_Test2, cad)\r\nPMax_Test3, PMax_cad_Test3 =maxp (P_Test3, cad)\r\nPMax_Test4, PMax_cad_Test4 =maxp (P_Test4, cad)\r\n\r\nfrom fc_imep import imep #call function to calculate IMEP\r\nIMEP_Test1, IMEP_gross_Test1 =imep (P_Test1, dVth, Vd)\r\nIMEP_Test2, IMEP_gross_Test2 =imep (P_Test2, dVth, Vd)\r\nIMEP_Test3, IMEP_gross_Test3 =imep (P_Test3, dVth, Vd)\r\nIMEP_Test4, IMEP_gross_Test4 =imep (P_Test4, dVth, Vd)\r\n\r\n# Create bar plot for IMEP_gross\r\nIMEP = [IMEP_gross_Test1, IMEP_gross_Test2 ,IMEP_gross_Test3,IMEP_gross_Test4]\r\nlabels = ['Mot', 'Adv5', 'Adv11', 'Adv15']\r\nx = np.arange(np.size(IMEP)) # creates array for bar order\r\nfig, ax = plt.subplots()\r\nplt.bar(x, IMEP,width=0.3,color=['C0','C1', 'C2', 'C3'])\r\n\r\nfor index, value in enumerate(x):\r\n plt.text(value, (IMEP[index]+0.2), str(np.round(IMEP[index],2)),horizontalalignment='center')\r\n \r\nax.set_ylabel('IMEP_{gross}, bar')\r\nax.set_xlabel('Test Mode \\n \\n Fig.5. Indicated Mean Effective Pressure, Gross')\r\n\r\nylim=np.round((np.max(IMEP)+1),2)\r\nax.set_ylim(-1, ylim)\r\nplt.xticks(x, labels)\r\nplt.show()\r\n\r\n# Create bar plot for Pmax\r\nPmax = [PMax_Test1, PMax_Test2 ,PMax_Test3,PMax_Test4]\r\n\r\nx = np.arange(np.size(Pmax)) # creates array for bar order\r\nfig, ax = plt.subplots()\r\nplt.bar(x, Pmax,width=0.3, color=['C0','C1', 'C2', 'C3'])\r\n\r\nfor index, value in enumerate(x):\r\n plt.text(value, (Pmax[index]+2), str(np.round(Pmax[index],1)),horizontalalignment='center')\r\n \r\nax.set_ylabel('Pressure, bar')\r\nax.set_xlabel('Test Mode \\n \\n Fig.6. Maximal Pressure')\r\n\r\nylim=np.round((np.max(Pmax)+10),2)\r\nax.set_ylim(0, ylim)\r\nplt.xticks(x, labels_line_all)\r\nplt.show()\r\n\r\n# Create bar plot for Pmax cad\r\nPmax_cad = [PMax_cad_Test2,PMax_cad_Test3,PMax_cad_Test4]\r\n\r\nx = np.arange(np.size(Pmax_cad)) # creates array for bar order\r\nfig, ax = plt.subplots()\r\nplt.bar(x, Pmax_cad,width=0.3, color=['C1', 'C2', 'C3'])\r\n\r\nfor index, value in enumerate(x):\r\n plt.text(value, (Pmax_cad[index]+0.5), str(np.round(Pmax_cad[index],1)),horizontalalignment='center')\r\n \r\nax.set_ylabel('CAD, ATDC')\r\nax.set_xlabel('Test Mode \\n \\n Fig.7. Maximal Pressure Angle')\r\n\r\nylim=np.round((np.max(Pmax_cad)+1),2)\r\nax.set_ylim(0, ylim)\r\nplt.xticks(x, labels_line_fired)\r\nplt.show()\r\n\r\n#Calculation of average temperature in cylinder\r\n\r\nfrom fc_Tcyl import Tcyl #call function to calculate combustion parameters\r\n\r\nTcyl_Test1, cad_st= Tcyl (P_Test1, CR, Speed)\r\nTcyl_Test2, cad_st= Tcyl (P_Test2, CR, Speed)\r\nTcyl_Test3, cad_st= Tcyl (P_Test3, CR, Speed)\r\nTcyl_Test4, cad_st= Tcyl (P_Test4, CR, Speed)\r\n\r\n# Create average temperature in cylinder plot\r\nfig, ax = plt.subplots()\r\nax.grid() #create grid\r\nax.set_ylabel('Temperature, K')\r\nax.set_xlabel('CAD \\n \\n Fig.8. Average Temperature in Cylinder')\r\n\r\nmaxval=np.max([np.max(Tcyl_Test1), np.max(Tcyl_Test2), np.max(Tcyl_Test3), np.max(Tcyl_Test4)])\r\nylim=maxval+120\r\nax.set_xlim(-80, 120) #set axe limits\r\nax.set_ylim(300, ylim)\r\nax.xaxis.set_major_locator(MultipleLocator(20)) # distribute major ticks on x axis\r\nax.yaxis.set_major_locator(MultipleLocator(100)) # distribute major ticks on y axis\r\nline, = ax.plot(cad_st-360, Tcyl_Test1, label=label_Test1) #create multilple line plots\r\nline, = ax.plot(cad_st-360, Tcyl_Test2, label=label_Test2)\r\nline, = ax.plot(cad_st-360, Tcyl_Test3, label=label_Test3)\r\nline, = ax.plot(cad_st-360, Tcyl_Test4, label=label_Test4)\r\nax.legend()\r\nplt.show()\r\n\r\n# Calculate burn rate\r\n\r\n\r\nfrom fc_dQ import dQ #call function to calculate combustion parameters\r\n\r\ndQ_Test1, Q_Test1, Qr_Test1, IGN_Delay_Test1, HR10_Test1, HR1050_Test1, HR1090_Test1, HR50cad_Test1, cad_s= dQ (P_Test1, CR, SOI_Test1)\r\ndQ_Test2, Q_Test2, Qr_Test2, IGN_Delay_Test2, HR10_Test2, HR1050_Test2, HR1090_Test2, HR50cad_Test2, cad_s= dQ (P_Test2, CR, SOI_Test2)\r\ndQ_Test3, Q_Test3, Qr_Test3, IGN_Delay_Test3, HR10_Test3, HR1050_Test3, HR1090_Test3, HR50cad_Test3, cad_s= dQ (P_Test3, CR, SOI_Test3)\r\ndQ_Test4, Q_Test4, Qr_Test4, IGN_Delay_Test4, HR10_Test4, HR1050_Test4, HR1090_Test4, HR50cad_Test4, cad_s= dQ (P_Test4, CR, SOI_Test4)\r\n\r\n# Create apparent heat release rate plot\r\nfig, ax = plt.subplots()\r\nax.grid() #create grid\r\nax.set_ylabel('AHRR, J/deg')\r\nax.set_xlabel('CAD \\n \\n Fig.9. Apparent Heat Release Rate')\r\n\r\nmaxval=np.max([np.max(dQ_Test1), np.max(dQ_Test2), np.max(dQ_Test3), np.max(dQ_Test4)])\r\nylim=maxval+5\r\nax.set_xlim(-20, 80) #set axe limits\r\nax.set_ylim(-3, ylim)\r\nax.xaxis.set_major_locator(MultipleLocator(10)) # distribute major ticks on x axis\r\nax.yaxis.set_major_locator(MultipleLocator(5)) # distribute major ticks on y axis\r\n\r\n\r\nline, = ax.plot(cad_s-360, dQ_Test1, label=label_Test1) #create multilple line plots\r\nline, = ax.plot(cad_s-360, dQ_Test2, label=label_Test2)\r\nline, = ax.plot(cad_s-360, dQ_Test3, label=label_Test3)\r\nline, = ax.plot(cad_s-360, dQ_Test4, label=label_Test4)\r\nax.legend()\r\nplt.show()\r\n\r\n# Create apparent heat release plot\r\nfig, ax = plt.subplots()\r\nax.grid() #create grid\r\nax.set_ylabel('AHR, J')\r\nax.set_xlabel('CAD \\n \\n Fig.10. Apparent Heat Release')\r\n\r\nmaxval=np.max([np.max(Q_Test1), np.max(Q_Test2), np.max(Q_Test3), np.max(Q_Test4)])\r\nylim=maxval+40\r\nax.set_xlim(-20, 80) #set axe limits\r\nax.set_ylim(-60, ylim)\r\nax.xaxis.set_major_locator(MultipleLocator(10)) # distribute major ticks on x axis\r\nax.yaxis.set_major_locator(MultipleLocator(100)) # distribute major ticks on y axis\r\nline, = ax.plot(cad_s-360, Q_Test1, label=label_Test1) #create multilple line plots\r\nline, = ax.plot(cad_s-360, Q_Test2, label=label_Test2)\r\nline, = ax.plot(cad_s-360, Q_Test3, label=label_Test3)\r\nline, = ax.plot(cad_s-360, Q_Test4, label=label_Test4)\r\nax.legend()\r\nplt.show()\r\n\r\n# Create apparent heat release plot\r\nfig, ax = plt.subplots()\r\nax.grid() #create grid\r\nax.set_ylabel('AHR, %')\r\nax.set_xlabel('CAD \\n \\n Fig.11. Apparent Relative Heat Release')\r\n\r\nax.set_xlim(-20, 80) #set axe limits\r\nax.set_ylim(-10, 105)\r\nax.xaxis.set_major_locator(MultipleLocator(10)) # distribute major ticks on x axis\r\nax.yaxis.set_major_locator(MultipleLocator(10)) # distribute major ticks on y axis\r\nline, = ax.plot(cad_s-360, Qr_Test1, label=label_Test1) #create multilple line plots\r\nline, = ax.plot(cad_s-360, Qr_Test2, label=label_Test2)\r\nline, = ax.plot(cad_s-360, Qr_Test3, label=label_Test3)\r\nline, = ax.plot(cad_s-360, Qr_Test4, label=label_Test4)\r\nax.legend()\r\nplt.show()\r\n\r\n# Create bar plot for Ignition Delay\r\nIgnition_Delay = [IGN_Delay_Test2,IGN_Delay_Test3,IGN_Delay_Test4]\r\n\r\nx = np.arange(np.size(Ignition_Delay)) # creates array for bar order\r\nfig, ax = plt.subplots()\r\nplt.bar(x, Ignition_Delay,width=0.3,color=['C1', 'C2', 'C3'])\r\n\r\nfor index, value in enumerate(x):\r\n plt.text(value, (Ignition_Delay[index]+0.5), str(np.round(Ignition_Delay[index],1)),horizontalalignment='center')\r\n \r\nax.set_ylabel('CAD')\r\nax.set_xlabel('Test Mode \\n \\n Fig.12. Ignition Delay')\r\n\r\nylim=np.round((np.max(Ignition_Delay)+1),2)\r\nax.set_ylim(0, ylim)\r\nplt.xticks(x, labels_line_fired)\r\nplt.show()\r\n\r\n# Create zoomed apparent heat release rate plot\r\nfig, ax = plt.subplots()\r\nax.grid() #create grid\r\nax.set_ylabel('AHRR, J/deg')\r\nax.set_xlabel('CAD \\n \\n Fig.13. Apparent Heat Release Rate')\r\n\r\nmaxval=np.max([np.max(dQ_Test1), np.max(dQ_Test2), np.max(dQ_Test3), np.max(dQ_Test4)])\r\nylim=maxval+5\r\nax.set_xlim(-18, 2) #set axe limits\r\nax.set_ylim(-3, 2)\r\nax.xaxis.set_major_locator(MultipleLocator(1)) # distribute major ticks on x axis\r\nax.yaxis.set_major_locator(MultipleLocator(0.5)) # distribute major ticks on y axis\r\nline, = ax.plot(cad_s-360, dQ_Test1, label=label_Test1) #create multilple line plots\r\n\r\n# automatic annotation\r\nx_Test2=-SOI_Test2\r\ny_Test2=dQ_Test2[(200-SOI_Test2*10)]\r\n\r\nx_Test3=-SOI_Test3\r\ny_Test3=dQ_Test3[(200-SOI_Test3*10)]\r\n\r\nx_Test4=-SOI_Test4\r\ny_Test4=dQ_Test4[(200-SOI_Test4*10)]\r\n\r\narrow_SOI_Test2 = ax.annotate('SOI ' + str(SOI_Test2), xy=(x_Test2, y_Test2), xytext=(x_Test2 -1.5, y_Test2 + 1),\r\n arrowprops=dict(arrowstyle = '->', connectionstyle = 'arc3',facecolor='orange'))\r\narrow_SOI_Test3 = ax.annotate('SOI ' + str(SOI_Test3), xy=(x_Test3, y_Test3), xytext=(x_Test3 -1.5, y_Test3 + 1),\r\n arrowprops=dict(arrowstyle = '->', connectionstyle = 'arc3',facecolor='green'))\r\narrow_SOI_Test4 = ax.annotate('SOI ' + str(SOI_Test4), xy=(x_Test4, y_Test4), xytext=(x_Test4 -1.5 , y_Test4 + 1),\r\n arrowprops=dict(arrowstyle = '->', connectionstyle = 'arc3',facecolor='red'))\r\n\r\nline, = ax.plot(cad_s-360, dQ_Test2, label=label_Test2)\r\nline, = ax.plot(cad_s-360, dQ_Test3, label=label_Test3)\r\nline, = ax.plot(cad_s-360, dQ_Test4, label=label_Test4)\r\nax.legend()\r\nplt.show()\r\n\r\n# Create bar plot for 50% AHR\r\nHR50cad = [HR50cad_Test2,HR50cad_Test3,HR50cad_Test4]\r\n\r\nx = np.arange(np.size(HR50cad)) # creates array for bar order\r\nfig, ax = plt.subplots()\r\nplt.bar(x, HR50cad,width=0.3,color=['C1', 'C2', 'C3'])\r\n\r\nfor index, value in enumerate(x):\r\n plt.text(value, (HR50cad[index]+1), str(np.round(HR50cad[index],1)),horizontalalignment='center')\r\n \r\nax.set_ylabel('CAD')\r\nax.set_xlabel('Test Mode \\n \\n Fig.14. Angle of 50% AHR')\r\n\r\nylim=np.round((np.max(HR50cad)+3),2)\r\nax.set_ylim(0, ylim)\r\nplt.xticks(x, labels_line_fired)\r\nplt.show()\r\n\r\n# Create bar plot for duration of 10% AHR\r\nHR10 = [HR10_Test2,HR10_Test3,HR10_Test4]\r\n\r\nx = np.arange(np.size(HR10)) # creates array for bar order\r\nfig, ax = plt.subplots()\r\nplt.bar(x, HR10,width=0.3,color=['C1', 'C2', 'C3'])\r\n\r\nfor index, value in enumerate(x):\r\n plt.text(value, (HR10[index]+1), str(np.round(HR10[index],1)),horizontalalignment='center')\r\n \r\nax.set_ylabel('CAD')\r\nax.set_xlabel('Test Mode \\n \\n Fig.15. Duration of 10% AHR')\r\n\r\nylim=np.round((np.max(HR10)+3),2)\r\nax.set_ylim(0, ylim)\r\nplt.xticks(x, labels_line_fired)\r\nplt.show()\r\n\r\n# Create bar plot for duration of 10% - 50% AHR\r\nHR1050 = [HR1050_Test2,HR1050_Test3,HR1050_Test4]\r\n\r\nx = np.arange(np.size(HR1050)) # creates array for bar order\r\nfig, ax = plt.subplots()\r\nplt.bar(x, HR1050,width=0.3,color=['C1', 'C2', 'C3'])\r\n\r\nfor index, value in enumerate(x):\r\n plt.text(value, (HR1050[index]+1), str(np.round(HR1050[index],1)),horizontalalignment='center')\r\n \r\nax.set_ylabel('CAD')\r\nax.set_xlabel('Test Mode \\n \\n Fig.16. Duration of 10-50% AHR')\r\n\r\nylim=np.round((np.max(HR1050)+3),2)\r\nax.set_ylim(0, ylim)\r\nplt.xticks(x, labels_line_fired)\r\nplt.show()\r\n\r\n# Create bar plot for duration of 10% - 90% AHR\r\nHR1090 = [HR1090_Test2,HR1090_Test3,HR1090_Test4]\r\n\r\nx = np.arange(np.size(HR1090)) # creates array for bar order\r\nfig, ax = plt.subplots()\r\nplt.bar(x, HR1090,width=0.3,color=['C1', 'C2', 'C3'])\r\n\r\nfor index, value in enumerate(x):\r\n plt.text(value, (HR1090[index]+1), str(np.round(HR1090[index],1)),horizontalalignment='center')\r\n \r\nax.set_ylabel('CAD')\r\nax.set_xlabel('Test Mode \\n \\n Fig.17. Duration of 10-90% AHR')\r\n\r\nylim=np.round((np.max(HR1090)+3),2)\r\nax.set_ylim(0, ylim)\r\nplt.xticks(x, labels_line_fired)\r\nplt.show()\r\n\r\n\r\n\r\n","sub_path":"Sc1.py","file_name":"Sc1.py","file_ext":"py","file_size_in_byte":14928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"210773124","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, division\nimport argparse\nimport caffe\nimport cv2\nimport numpy as np\nimport os\nfrom os.path import exists, join, split, splitext\nimport shutil\nimport json\n\nimport matplotlib.pyplot as plt\n# import network\n# import util\n\n__author__ = 'Soonmin Hwang'\n__email__ = 'smhwang@rcv.kaist.ac.kr'\n__description__ = 'This code is a modified version of F.Yus implementation. \\\n (https://github.com/fyu/dilated.git) '\n\n\n# def read_array(filename):\n# with open(filename, 'rb') as fp:\n# type_code = np.fromstring(fp.read(4), dtype=np.int32)\n# shape_size = np.fromstring(fp.read(4), dtype=np.int32)\n# shape = np.fromstring(fp.read(4 * shape_size), dtype=np.int32)\n# if type_code == cv2.CV_32F:\n# dtype = np.float32\n# if type_code == cv2.CV_64F:\n# dtype = np.float64\n# return np.fromstring(fp.read(), dtype=dtype).reshape(shape)\n\n\n# def write_array(filename, array):\n# with open(filename, 'wb') as fp:\n# if array.dtype == np.float32:\n# typecode = cv2.CV_32F\n# elif array.dtype == np.float64:\n# typecode = cv2.CV_64F\n# else:\n# raise ValueError(\"type is not supported\")\n# fp.write(np.array(typecode, dtype=np.int32).tostring())\n# fp.write(np.array(len(array.shape), dtype=np.int32).tostring())\n# fp.write(np.array(array.shape, dtype=np.int32).tostring())\n# fp.write(array.tostring())\n\n\ndef test_image(options):\n\n # label_margin = 186\n\n if options.gpu >= 0:\n caffe.set_mode_gpu()\n caffe.set_device(options.gpu)\n print('Using GPU ', options.gpu)\n else:\n caffe.set_mode_cpu()\n print('Using CPU')\n\n mean_pixel = np.array(options.mean, dtype=np.float32)\n net = caffe.Net(options.deploy_net, options.weights, caffe.TEST)\n\n image_paths = [line.strip() for line in open(options.image_list, 'r')]\n image_names = [split(p)[1] for p in image_paths]\n input_dims = list(net.blobs['data'].shape)\n\n assert input_dims[0] == 1\n batch_size, num_channels, input_height, input_width = input_dims\n print('Input size:', input_dims)\n caffe_in = np.zeros(input_dims, dtype=np.float32)\n\n output_height = input_height\n output_width = input_width\n\n result_list = []\n feat_list = []\n\n\n with open(options.info, 'r') as fp:\n info = json.load(fp)\n palette = np.array(info['palette'], dtype=np.uint8)\n\n\n for i in range(len(image_names)):\n print('Predicting', image_names[i])\n image_ori = cv2.imread(image_paths[i]).astype(np.float32) - mean_pixel \n image_size = image_ori.shape \n print('Image size:', image_size)\n\n image = cv2.resize(image_ori, (input_dims[2],input_dims[3]), interpolation = cv2.INTER_CUBIC)\n\n caffe_in[0] = image.transpose([2, 0, 1])\n out = net.forward_all(blobs=[], **{net.inputs[0]: caffe_in})\n prob = out['pred'][0]\n\n # image = cv2.copyMakeBorder(image, label_margin, label_margin,\n # label_margin, label_margin,\n # cv2.BORDER_REFLECT_101)\n # num_tiles_h = input_height // output_height + \\\n # (1 if image_size[0] % output_height else 0)\n # num_tiles_w = input_width // output_width + \\\n # (1 if image_size[1] % output_width else 0)\n # prediction = []\n # feat = []\n # for h in range(num_tiles_h):\n # col_prediction = []\n # col_feat = []\n # for w in range(num_tiles_w):\n # offset = [output_height * h,\n # output_width * w]\n # tile = image[offset[0]:offset[0] + input_height,\n # offset[1]:offset[1] + input_width, :]\n # margin = [0, input_height - tile.shape[0],\n # 0, input_width - tile.shape[1]]\n # # tile = cv2.copyMakeBorder(tile, margin[0], margin[1],\n # # margin[2], margin[3],\n # # cv2.BORDER_REFLECT_101)\n\n # caffe_in[0] = tile.transpose([2, 0, 1])\n # blobs = []\n # out = net.forward_all(blobs=blobs, **{net.inputs[0]: caffe_in})\n # prob = out['pred'][0]\n # col_prediction.append(prob)\n # col_prediction = np.concatenate(col_prediction, axis=2)\n # prediction.append(col_prediction)\n # prob = np.concatenate(prediction, axis=1)\n\n # zoom_prob = prob[:, :image_size[0], :image_size[1]]\n # prediction = np.argmax(zoom_prob.transpose([1, 2, 0]), axis=2)\n prediction = np.argmax(prob.transpose([1, 2, 0]), axis=2)\n prediction = cv2.resize(prediction, (image_size[1], image_size[0]), interpolation=cv2.INTER_NEAREST)\n \n from PIL import PngImagePlugin, Image\n \n\n\n out_path = join(options.result_dir,\n splitext(image_names[i])[0] + '.png')\n print('Writing', out_path)\n # cv2.imwrite(out_path, prediction) \n\n im = Image.fromarray(prediction.astype(np.uint8), mode='P')\n im.putpalette(palette.flatten())\n # im.info['palette'] = palette\n im.save(out_path)\n\n # meta = PngImagePlugin.PngInfo()\n # reserved = ('interlace', 'gamma', 'dpi', 'transparency', 'aspect')\n # for k, v, in im.info.iteritems():\n # if k in reserved: continue\n # meta.add_text(k, v, 0) \n # im.save(out_path, \"PNG\", pnginfo=meta)\n\n\n # out_path = join(options.result_dir,\n # 'seg_' + splitext(image_names[i])[0] + '.png')\n\n # color_image = palette[prediction.ravel()].reshape(image_size) \n # color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)\n\n # color_image = color_image * 0.3 + image_ori * 0.7\n # cv2.imwrite(out_path, color_image)\n\n # import ipdb\n # ipdb.set_trace()\n\n print('================================')\n print('All results are generated.')\n print('================================')\n\n # result_list_path = join(options.result_dir, 'results.txt')\n # print('Writing', result_list_path)\n # with open(result_list_path, 'w') as fp:\n # fp.write('\\n'.join(result_list))\n # if options.bin:\n # feat_list_path = join(options.feat_dir, 'feats.txt')\n # print('Writing', feat_list_path)\n # with open(feat_list_path, 'w') as fp:\n # fp.write('\\n'.join(feat_list))\n\n\ndef process_options(options):\n assert exists(options.image_list), options.image_list + ' does not exist'\n assert exists(options.weights), options.weights + ' does not exist'\n \n work_dir = options.work_dir\n model = options.model\n\n assert exists(options.deploy_net), options.deploy_net + 'does not exist'\n shutil.copy(options.deploy_net, join(work_dir, 'deploy.prototxt'))\n\n options.result_dir = join(work_dir, 'results', options.sub_dir)\n\n if not exists(work_dir):\n print('Creating working directory', work_dir)\n os.makedirs(work_dir)\n if not exists(options.result_dir):\n print('Creating', options.result_dir)\n os.makedirs(options.result_dir)\n \n return options\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', default='pspnet101', choices=['pspnet101'])\n parser.add_argument('--work_dir', default='jobs/pascal_voc/pspnet101_init',\n help='Working directory')\n parser.add_argument('--sub_dir', default='',\n help='Subdirectory to store the model testing results. '\n 'For example, if it is set to \"val\", the testing '\n 'results will be saved in /results/val/ '\n 'folder. By default, the results are saved in '\n '/results/ directly.')\n parser.add_argument('--image_list', required=True,\n help='List of images to test on. This is required '\n 'for context module to deal with variable image '\n 'size.')\n parser.add_argument('--weights', required=True)\n parser.add_argument('--deploy_net', required=True)\n parser.add_argument('--info', required=True)\n parser.add_argument('--mean', nargs='*', default=[104.008, 116.669, 122.675], type=float,\n help='Mean pixel value (BGR) for the dataset.\\n'\n 'Default is the mean pixel of PASCAL dataset.')\n parser.add_argument('--classes', type=int, required=True,\n help='Number of categories in the data')\n parser.add_argument('--gpu', type=int, default=0,\n help='GPU for testing. If it is less than 0, '\n 'CPU is used instead.')\n \n options = process_options(parser.parse_args()) \n test_image(options)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pspnet/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":9185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"60365970","text":"from django import forms\n\nfrom .models import MFUser, MemberField, JobExperience\nfrom fields.models import Field\n\n\nclass JoinForm(forms.ModelForm):\n\n class Meta:\n model = MFUser\n fields = ('first_name',\n 'last_name',\n 'username',\n 'password',\n 'email',\n )\n\n\nclass AddFieldForm(forms.Form):\n\n name = forms.ModelChoiceField(queryset=Field.objects.all())\n mentor = forms.ChoiceField(choices=((False, 'No'), (True, 'Yes')))\n\n def __init__(self, *args, **kwargs):\n member = kwargs.pop('user')\n members_fields = MemberField.objects.filter(member=member.id)\n remove_list = [Field.objects.get(pk=item.field.id).id\n for item in members_fields]\n super(AddFieldForm, self).__init__(*args, **kwargs)\n if member:\n self.fields['name'].queryset = \\\n Field.objects.all().exclude(id__in=[id for id in remove_list])\n\n def clean(self):\n name = self.cleaned_data['name']\n mentor = self.cleaned_data['mentor']\n if not name:\n raise forms.ValidationError('No field entered.')\n if not mentor:\n raise forms.ValidationError('Mentorship field not answered.')\n return self.cleaned_data\n\nclass IntroForm(forms.Form):\n widget = forms.widgets.Textarea(attrs={'class': 'span6'})\n intro_entry = forms.CharField(max_length=6000, widget=widget)\n\n\nclass AddJobForm(forms.ModelForm):\n\n class Meta:\n model = JobExperience\n fields = ('company', 'start_date', 'end_date', 'job_summary')\n\n\n","sub_path":"MentorFinder2/members/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"51779423","text":"import argparse\r\nfrom nltk.stem import PorterStemmer\r\nfrom nltk.tokenize import word_tokenize\r\nimport data\r\n\r\n\r\n# Set up CLI Arguments\r\nparser = argparse.ArgumentParser()\r\n\r\n# Arguments\r\nrequired = parser.add_argument_group('required arguments')\r\nrequired.add_argument(\"-d\", \"--data\", required=True, help=\"student/twitter/research.\")\r\n\r\nargs = parser.parse_args()\r\n\r\nif args.data == 'student':\r\n text = data.student_course_feedback\r\nelif args.data == 'twitter':\r\n text = data.twitter\r\nelif args.data == 'research':\r\n text = data.research_paper\r\n\r\n# Tokenize the data\r\ntoken_data = word_tokenize(text)\r\n\r\nstemmer = PorterStemmer()\r\n\r\n# Printing the stem word\r\nfor word in token_data:\r\n\trootWord=stemmer.stem(word)\r\n\tprint(\"Stem word for {} is {}\".format(word,rootWord))","sub_path":"stemmer.py","file_name":"stemmer.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"326086496","text":"######## Object Detection and Counting function #########\r\n#\r\n# Author: Teng Yang Yu\r\n# Date: 2020/07/17\r\n# Description: \r\n# This program uses a TensorFlow-trained classifier to perform object detection.\r\n# It use counting function.\r\n# model use form https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf1_detection_zoo.md\r\n# label map use form https://github.com/tensorflow/models\r\n# frame of the video.\r\n\r\n# Import packages\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport sys\r\n\r\n# This is needed since the notebook is stored in the object_detection folder.\r\nsys.path.append(\"..\")\r\n\r\n# Import utilites\r\nfrom utils import label_map_util\r\nfrom utils import visualization_utils_count as vis_util\r\n\r\n# Name of the directory containing the object detection module we're using\r\nMODEL_NAME = 'faster_rcnn_inception_v2_coco_2018_01_28'\r\nVIDEO_NAME = 'video/hightway.mp4'\r\ntotal_passed_vehicle = 0 \r\n# Grab path to current working directory\r\nCWD_PATH = os.getcwd()\r\n\r\n# Path to frozen detection graph .pb file, which contains the model that is used\r\n# for object detection.\r\nPATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')\r\n\r\n# Path to label map file\r\nPATH_TO_LABELS = os.path.join(CWD_PATH,'data','mscoco_label_map.pbtxt')\r\n\r\n# Path to video\r\nPATH_TO_VIDEO = os.path.join(CWD_PATH,VIDEO_NAME)\r\n\r\n# Number of classes the object detector can identify\r\nNUM_CLASSES = 90\r\n\r\n# Load the label map.\r\n# Label maps map indices to category names, so that when our convolution\r\n# network predicts `5`, we know that this corresponds to `king`.\r\n# Here we use internal utility functions, but anything that returns a\r\n# dictionary mapping integers to appropriate string labels would be fine\r\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\r\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\r\ncategory_index = label_map_util.create_category_index(categories)\r\n\r\n\r\n# Load the Tensorflow model into memory.\r\ndetection_graph = tf.Graph()\r\nwith detection_graph.as_default():\r\n od_graph_def = tf.GraphDef()\r\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\r\n serialized_graph = fid.read()\r\n od_graph_def.ParseFromString(serialized_graph)\r\n tf.import_graph_def(od_graph_def, name='')\r\n\r\n sess = tf.Session(graph=detection_graph)\r\n\r\n# Define input and output tensors (i.e. data) for the object detection classifier\r\n\r\n# Input tensor is the image\r\nimage_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\r\n\r\n# Output tensors are the detection boxes, scores, and classes\r\n# Each box represents a part of the image where a particular object was detected\r\ndetection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\r\n\r\n# Each score represents level of confidence for each of the objects.\r\n# The score is shown on the result image, together with the class label.\r\ndetection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\r\ndetection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\r\n\r\n# Number of objects detected\r\nnum_detections = detection_graph.get_tensor_by_name('num_detections:0')\r\n\r\n# Open video file\r\nvideo = cv2.VideoCapture(PATH_TO_VIDEO)\r\nheight = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\r\nwidth = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\r\nwhile(video.isOpened()):\r\n\r\n # Acquire frame and expand frame dimensions to have shape: [1, None, None, 3]\r\n # i.e. a single-column array, where each item in the column has the pixel RGB value\r\n ret, frame = video.read()\r\n input_frame = frame\r\n # frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\r\n frame_expanded = np.expand_dims(input_frame, axis=0)\r\n\r\n # Perform the actual detection by running the model with the image as input\r\n (boxes, scores, classes, num) = sess.run(\r\n [detection_boxes, detection_scores, detection_classes, num_detections],\r\n feed_dict={image_tensor: frame_expanded})\r\n\r\n # Draw the results of the detection (aka 'visulaize the results')\r\n # Visualization of the results of a detection. \r\n is_color_recognition_enabled = 1 # set it to 1 for enabling the color prediction for the detected objects\r\n roi = int(height*0.7) # roi line position,這是線的位置\r\n deviation = 2 # the constant that represents the object counting area\r\n counter, csv_line, counting_mode = vis_util.visualize_boxes_and_labels_on_image_array_y_axis(\r\n video.get(1),\r\n input_frame,\r\n 2,\r\n is_color_recognition_enabled,\r\n np.squeeze(boxes),\r\n np.squeeze(classes).astype(np.int32),\r\n np.squeeze(scores),\r\n category_index,\r\n y_reference = roi,\r\n deviation = deviation,\r\n use_normalized_coordinates=True,\r\n line_thickness=4)\r\n\r\n # when the vehicle passed over line and counted, make the color of ROI line green\r\n if counter == 1: \r\n cv2.line(input_frame, (int(width/2), roi), (width, roi), (0, 0xFF, 0), 5)\r\n else:\r\n cv2.line(input_frame, (int(width/2), roi), (width, roi), (0, 0, 0xFF), 5)\r\n \r\n total_passed_vehicle = total_passed_vehicle + counter\r\n\r\n # insert information text to video frame\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n cv2.putText(\r\n input_frame,\r\n 'Detected Vehicles: ' + str(total_passed_vehicle),\r\n (10, 35),\r\n font,\r\n 0.8,\r\n (0, 0xFF, 0xFF),\r\n 2,\r\n cv2.FONT_HERSHEY_SIMPLEX,\r\n ) \r\n \r\n cv2.putText(\r\n input_frame,\r\n 'ROI Line',\r\n (545, roi-10),\r\n font,\r\n 0.6,\r\n (0, 0, 0xFF),\r\n 2,\r\n cv2.LINE_AA,\r\n )\r\n\r\n if(len(counting_mode) == 0):\r\n cv2.putText(input_frame, \"...\", (10, 55), font, 0.8, (0,255,255),2,cv2.FONT_HERSHEY_SIMPLEX) \r\n else:\r\n cv2.putText(input_frame, counting_mode, (10, 55), font, 0.8, (0,255,255),2,cv2.FONT_HERSHEY_SIMPLEX)\r\n \r\n cv2.imshow('object counting',input_frame)\r\n\r\n\r\n # Press 'q' to quit\r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\n\r\n# Clean up\r\nvideo.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"counting.py","file_name":"counting.py","file_ext":"py","file_size_in_byte":6219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"105726636","text":"\"\"\"\neasy to miss those edge cases:\n\n[[0]]: return 0\n\n[[1]]: return -1\n\n[[2]]: return 0\n\n[[2, 1, 1], [0, 0, 0], [1, 1, 1]]: return -1\n\n\n# time/space O(m*n)\n# edge case: grid is initially all rotten or all good\n\"\"\"\n\n\nimport collections\n\nclass Solution(object):\n def orangesRotting(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n if not grid or not grid[0]: # return -1 or 0 ? discuss with interviewer\n return 0\n m, n = len(grid), len(grid[0])\n queue = collections.deque()\n for i in range(m):\n for j in range(n):\n if grid[i][j] == 2:\n queue.append((i, j))\n day = -1 # wrong initilization: day = 0\n while queue:\n for _ in range(len(queue)):\n i, j = queue.popleft()\n for dx, dy in [(1, 0), (-1, 0), (0, 1), (0, -1)]:\n p, q = i + dx, j + dy\n if 0 <= p < m and 0 <= q < n and grid[p][q] == 1:\n grid[p][q] = 2\n queue.append((p, q))\n day += 1\n for i in range(m): # easy to forget double check. Edge case: [[2, 1, 1], [0, 0, 0], [1, 1, 1]]\n for j in range(n):\n if grid[i][j] == 1:\n return -1\n return max(day, 0) # easy to miss max(), when the grid is all 0\n\n\n\"\"\"\nIn a given grid, each cell can have one of three values:\n\nthe value 0 representing an empty cell;\nthe value 1 representing a fresh orange;\nthe value 2 representing a rotten orange.\nEvery minute, any fresh orange that is adjacent (4-directionally) to a rotten orange becomes rotten.\n\nReturn the minimum number of minutes that must elapse until no cell has a fresh orange. If this is impossible, return -1 instead.\n\n \n\nExample 1:\n\n\n\nInput: [[2,1,1],[1,1,0],[0,1,1]]\nOutput: 4\nExample 2:\n\nInput: [[2,1,1],[0,1,1],[1,0,1]]\nOutput: -1\nExplanation: The orange in the bottom left corner (row 2, column 0) is never rotten, because rotting only happens 4-directionally.\nExample 3:\n\nInput: [[0,2]]\nOutput: 0\nExplanation: Since there are already no fresh oranges at minute 0, the answer is just 0.\n \n\nNote:\n\n1 <= grid.length <= 10\n1 <= grid[0].length <= 10\ngrid[i][j] is only 0, 1, or 2.\n\"\"\"\n","sub_path":"0994. Rotting Oranges.py","file_name":"0994. Rotting Oranges.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"543271219","text":"from pgmpy.models import DynamicBayesianNetwork as DBN\nfrom pgmpy.factors.discrete import TabularCPD\nfrom pgmpy.inference import DBNInference\n\n\ndef buildDBN():\n # Construct a DBN object\n dbn = DBN()\n dbn.add_edges_from([(('L', 0), ('O', 0)),\n (('L', 0), ('L', 1)),\n (('L', 1), ('O', 1))])\n\n\n # setup conditional probability tables for nodes in network\n\n O0_cpd = TabularCPD(('O', 0), 4, [[0.7, 0.1, 0.1, 0.1], # A\n [0.1, 0.7, 0.1, 0.1], # B\n [0.1, 0.1, 0.7, 0.1], # C\n [0.1, 0.1, 0.1, 0.7]], # D\n evidence=[('L', 0)], evidence_card=[4])\n\n l0_cpd = TabularCPD(('L', 0), 4, [[0], # A\n [0], # B\n [1], # C\n [0]]) # D\n\n l1_cpd = TabularCPD(('L', 1), 4, [[0.5, 0.0, 0.5, 0.0], # A\n [0.5, 0.5, 0.0, 0.0], # B\n [0.0, 0.0, 0.5, 0.5], # C\n [0.0, 0.5, 0.0, 0.5]], # D\n evidence=[('L', 0)], evidence_card=[4])\n\n #add these conditional probability tables to our BayesianModel\n dbn.add_cpds(l0_cpd, l1_cpd, O0_cpd)\n\n #initialize our model for time series analysis\n dbn.initialize_initial_state()\n\n # Create an inference object to perform queries\n dbn_inf = DBNInference(dbn)\n\n\n #print(dbn_inf.query(variables=[('L',1)], evidence={('O',1): 2})[('L',1)])\n\n\n return dbn_inf\n\n\nbuildDBN()\n","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"541534047","text":"import pkg_resources\nfrom sqlalchemy import create_engine,update\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom AGdb.rds_config import db_username,db_password,db_endpoint,db_port,db_name\n#from .rds_config import db_username,db_password,db_endpoint,db_port,db_name\nfrom AGdb.create_tables import Client,Camera,Client_Cameras,Stream,Stream_Details,Stream_MetaData,\\\n Stream_Details_Raw,Stream_Details_TS,Analytics_MetaData\nimport json\nimport datetime\nfrom sqlalchemy.dialects import mysql\nfrom sqlalchemy import func\nfrom sqlalchemy.sql.functions import coalesce\n\nclass Object(object):\n pass\n\n\nclass database:\n def __init__(self,id=None):\n Base = declarative_base()\n\n connection_string = \"mysql://\"+db_username+':'+db_password+ '@' +\\\n db_endpoint+':'+ str(db_port) +'/' + db_name\n\n engine = create_engine(connection_string)\n Base.metadata.bind = engine\n self.engine = engine\n DBSession = sessionmaker(bind=engine)\n self.session = DBSession()\n if id != None:\n self.id = id\n\n def close(self):\n self.session.close()\n self.engine.dispose()\n\n def get_clients(self):\n # List clients in the client table\n count = 0\n rs = []\n for instance in self.session.query(Client).order_by(Client.id):\n count = count + 1\n rs.append({'id':instance.id,'name':instance.name})\n rs1 = {'count':count,'result_set':rs}\n return rs1\n\n def get_client(self):\n # List clients in the client table\n count = 0\n rs = []\n for instance in self.session.query(Client).filter(Client.id== self.id):\n count = count + 1\n rs.append({'id':instance.id,'name':instance.name})\n rs1 = {'count':count,'result_set':rs}\n return rs1\n\n def get_cameras(self):\n # List clients in the client table\n count = 0\n rs = []\n for instance in self.session.query(Camera).order_by(Camera.id):\n count = count + 1\n rs.append({'id':instance.id,'name':instance.name})\n rs1 = {'count':count,'result_set':rs}\n return rs1\n\n def get_camera(self):\n # List cameras in the camera table\n count = 0\n rs = []\n for instance in self.session.query(Camera).filter(Camera.id== self.id):\n count = count + 1\n rs.append({'id':instance.id,'name':instance.name})\n rs1 = {'count':count,'result_set':rs}\n return rs1\n\n def get_client_camera(self):\n # List clients in the client_camera table\n count = 0\n rs = []\n\n for instance in self.session.query(Client,Camera,Client_Cameras).filter(Client.id== self.id) \\\n .filter(Client.id == Client_Cameras.client_id) \\\n .filter(Camera.id == Client_Cameras.camera_id):\n count = count + 1\n rs.append({'id':instance.Camera.id,'name':instance.Camera.name})\n rs1 = {'count':count,'result_set':rs}\n return rs1\n\n def get_stream(self):\n # List streams given camera id\n count = 0\n rs = []\n for instance in self.session.query(Stream).filter(Camera.id== self.id):\n count = count + 1\n rs.append({'id':instance.id,'name':instance.stream_name,'arn':instance.arn,'region':instance.region})\n rs1 = {'count':count,'result_set':rs}\n return rs1\n\n def get_stream_object(self,query_column,value):\n # get stream object with camera id given stream name\n if query_column == 'arn':\n instance = self.session.query(Stream).filter(Stream.arn== value).first()\n\n return instance\n\n def get_stream_details(self,live='False'):\n # List streams given camera id\n count = 0\n rs = []\n if live == 'False':\n for instance in self.session.query(Camera,Stream,Stream_Details).filter(Camera.id== self.id) \\\n .filter(Camera.id == Stream.camera_id, Stream.id == Stream_Details.stream_id):\n count = count + 1\n rs.append({'name':instance.Stream.stream_name,\n 'arn': instance.Stream.arn,\n 'id':instance.Stream_Details.id,\n 'manifest_file_name':instance.Stream_Details.manifest_file_name,\n 'live':instance.Stream_Details.live,\n 'start_time':instance.Stream_Details.start_time.strftime('%Y-%m-%d %H:%M:%S'),\n 'end_time':instance.Stream_Details.end_time.strftime('%Y-%m-%d %H:%M:%S') if instance.Stream_Details.end_time != None else None\n })\n else:\n for instance in self.session.query(Camera,Stream,Stream_Details).filter(Camera.id== self.id) \\\n .filter(Camera.id == Stream.camera_id, Stream.id == Stream_Details.stream_id)\\\n .filter(Stream_Details.live == 'True'):\n # for bug where start times are sometimes null. Skip that record completely\n if instance.Stream_Details.start_time != None :\n count = count + 1\n rs.append({'name':instance.Stream.stream_name,\n 'arn': instance.Stream.arn,\n 'id':instance.Stream_Details.id,\n 'manifest_file_name':instance.Stream_Details.manifest_file_name,\n 'live':instance.Stream_Details.live,\n 'start_time':instance.Stream_Details.start_time.strftime('%Y-%m-%d %H:%M:%S'),\n 'end_time':instance.Stream_Details.end_time.strftime('%Y-%m-%d %H:%M:%S') if instance.Stream_Details.end_time != None else None\n })\n\n rs1 = {'count':count,'result_set':rs}\n return rs1\n\n def get_stream_details_object(self,query_column,p_object):\n # if start time is found, we need to delete everything in attendant tables and rerun\n # table query does not seem to work with datetime\n\n if query_column == 'start_time':\n # ensure id and start_time is valid. Otherwise you will get False == 1 or True == 1 crap\n id = p_object.id\n stime = p_object.start_time\n q = self.session.query(Stream_Details)\n q1 = q.filter(Stream_Details.stream_id == id ,Stream_Details.start_time == stime )\n #print(q1)\n instance = q1.first()\n return instance\n\n def get_stream_details_object1(self,query_column,p_object):\n # if start time is found, we need to delete everything in attendant tables and rerun\n # table query does not seem to work with datetime\n if query_column == 'start_time':\n query_string = \" select id \" \\\n \"from Stream_Details where stream_id = \" + str(p_object.stream_id) +\\\n \" and start_time = '\" + str(p_object.start_time) + \"'\"\n instance = self.session.execute(query_string)\n if instance.rowcount == 1:\n for (id ) in instance:\n inst = self.session.query(Stream_Details).get(id)\n return inst\n return None\n\n def get_stream_details_by_time(self,stime,etime):\n # List streams given camera id and time parameters\n count = 0\n rs = []\n stime = datetime.datetime.strptime(stime, '%Y-%m-%d %H:%M:%S')\n etime = datetime.datetime.strptime(etime, '%Y-%m-%d %H:%M:%S')\n q = self.session.query(Camera,Stream,Stream_Details).filter(Camera.id== self.id)\\\n .filter(Camera.id == Stream.camera_id , Stream.id == Stream_Details.stream_id) \\\n .filter(Stream_Details.start_time >= stime, coalesce(Stream_Details.end_time,Stream_Details.start_time) <= etime)\n q1 = str(q.statement.compile(dialect=mysql.dialect()))\n\n # Use coalesce to set end time to start time for null end times for live events\n for instance in self.session.query(Camera,Stream,Stream_Details).filter(Camera.id== self.id)\\\n .filter(Camera.id == Stream.camera_id , Stream.id == Stream_Details.stream_id) \\\n .filter(Stream_Details.start_time >= stime, coalesce(Stream_Details.end_time,Stream_Details.start_time) <= etime) :\n count = count + 1\n rs.append({'name':instance.Stream.stream_name,\n 'arn': instance.Stream.arn,\n 'id':instance.Stream_Details.id,\n 'manifest_file_name':instance.Stream_Details.manifest_file_name,\n 'live':instance.Stream_Details.live,\n 'start_time':instance.Stream_Details.start_time.strftime('%Y-%m-%d %H:%M:%S'),\n 'end_time':instance.Stream_Details.end_time.strftime('%Y-%m-%d %H:%M:%S') if instance.Stream_Details.end_time != None else None\n })\n\n rs1 = {'count':count,'result_set':rs}\n return rs1\n\n def get_stream_details_raw(self,query_column,id):\n # if start time is found, we need to delete everything in attendant tables and rerun\n # table query does not seem to work with datetime\n if query_column == 'max_time':\n query_string = \" select max(server_time) \" \\\n \"from Stream_Details_Raw where stream_details_id = \" + str(id)\n elif query_column == 'min_time':\n query_string = \" select min(server_time) \" \\\n \"from Stream_Details_Raw where stream_details_id = \" + str(id)\n\n elif query_column == 'rawfilename':\n query_string = \" select producer_time,stream_details_id \" \\\n \"from Stream_Details_Raw where rawfilename = '\" + str(id) + \"'\"\n\n if query_column == 'max_rawfilename':\n query_string = \" select max(rawfilename) \" \\\n \"from Stream_Details_Raw where stream_details_id = \" + str(id)\n\n\n\n instance = self.session.execute(query_string)\n if instance.rowcount == 1:\n for (mt) in instance:\n return mt\n\n\n\n return None\n\n\n def get_stream_metadata_by_time(self,stime,etime,label):\n # List streams given camera id and time parameters\n # TODO need to send back non contigous labels only\n count = 0\n rs = []\n stime = datetime.datetime.strptime(stime, '%Y-%m-%d %H:%M:%S')\n etime = datetime.datetime.strptime(etime, '%Y-%m-%d %H:%M:%S')\n label = label.split(',')\n # q = self.session.query(Stream,Stream_Details,Stream_MetaData ).filter(Stream.camera_id== self.id)\\\n # .filter(Stream.id == Stream_Details.stream_id , Stream_Details.id == Stream_MetaData.stream_details_id) \\\n # .filter(Stream_Details.start_time >= stime, Stream_Details.end_time <= etime) \\\n # .filter(Stream_MetaData.label.in_(label))\n # q1 = str(q.statement.compile(dialect=mysql.dialect()))\n\n for instance in self.session.query(Stream,Stream_Details,Stream_MetaData ).filter(Stream.camera_id== self.id)\\\n .filter(Stream.id == Stream_Details.stream_id , Stream_Details.id == Stream_MetaData.stream_details_id) \\\n .filter(Stream_Details.start_time >= stime, coalesce(Stream_Details.end_time,Stream_Details.start_time) <= etime) \\\n .filter(Stream_MetaData.label.in_(label)):\n count = count + 1\n rs.append({'name':instance.Stream.stream_name,\n 'arn': instance.Stream.arn,\n 'id':instance.Stream_Details.id,\n 'label':instance.Stream_MetaData.label,\n 'manifest_file_name':instance.Stream_Details.manifest_file_name,\n 'live':instance.Stream_Details.live,\n 'label_timestamp':instance.Stream_MetaData.timestamp,\n # convert decimal to str to make it json serializable\n 'seconds': str(instance.Stream_MetaData.seconds),\n 'start_time':instance.Stream_Details.start_time.strftime('%Y-%m-%d %H:%M:%S'),\n 'end_time':instance.Stream_Details.end_time.strftime('%Y-%m-%d %H:%M:%S') if instance.Stream_Details.end_time != None else None\n })\n\n rs1 = {'count':count,'result_set':rs}\n return rs1\n\n\n def get_stream_metadata_by_time1(self,stime,etime,label):\n # List stream meta data given camera id and time parameters\n count = 0\n rs = []\n query_as_string = \"select Stream.stream_name,Stream.arn,Stream_Details.id,label,Stream_Details.manifest_file_name, \" \\\n \"Stream_Details.live, Stream_Details.start_time,Stream_Details.end_time \" \\\n \"from Stream,Stream_Details,Stream_MetaData \" \\\n \"where Stream.id = Stream_Details.stream_id and Stream_Details.id = Stream_MetaData.stream_details_id \" \\\n \"AND \" + \"Stream.camera_id= \" + str(self.id) + \" and \" + \"Stream_Details.start_time >= '\" + stime + \"' \"\\\n \"AND Stream_Details.end_time <= '\" + etime + \"'\" + \" AND Stream_MetaData.label in(\" + self.quote(label) + \")\"\n\n # Run as query as in statement does not work!\n result = self.session.execute(query_as_string)\n for (stream_name,arn,id,label,manifest_file_name,live,start_time,end_time) in result:\n count = count + 1\n rs.append({'name':stream_name,\n 'arn': arn,\n 'id':id,\n 'label':label,\n 'manifest_file_name':manifest_file_name,\n 'live':live,\n 'start_time':start_time.strftime('%Y-%m-%d %H:%M:%S'),\n 'end_time':end_time.strftime('%Y-%m-%d %H:%M:%S')})\n\n rs1 = {'count':count,'result_set':rs}\n return rs1\n\n\n def get_analytics_metaData_object(self,key):\n instance = self.session.query(Analytics_MetaData).filter(Analytics_MetaData.camera_id == self.id,\n Analytics_MetaData.key == key).first()\n return instance\n\n\n def quote(self,label):\n x = \"\"\n count = 0\n label = label.split(',')\n for i in label:\n x = x + \"'\" + i + \"'\"\n count = count + 1\n if count < len(label):\n x = x + \",\"\n return x\n\n def put_stream_details(self,p_object):\n # TODO package this https://python-packaging.readthedocs.io/en/latest/\n # https://www.pythonsheets.com/notes/python-sqlalchemy.html\n row = Stream_Details(stream_id=p_object.stream_id,\n manifest_file_name=p_object.manifest_file_name,\n live = p_object.live,\n resolution = p_object.resolution,\n start_time = p_object.start_time,\n end_time = p_object.end_time)\n self.session.add(row)\n self.session.commit()\n\n return row\n\n def put_stream_details_raw(self,p_object):\n row = Stream_Details_Raw(stream_details_id=p_object.stream_details_id,\n rawfilename=p_object.rawfilename,\n server_time = p_object.server_time,\n producer_time = p_object.producer_time\n )\n self.session.add(row)\n self.session.flush()\n self.session.commit()\n return row\n\n def put_stream_details_ts(self,p_object):\n row = Stream_Details_TS(stream_details_id=p_object.stream_details_id,\n transportname=p_object.transportname,\n server_time = p_object.server_time)\n self.session.add(row)\n self.session.commit()\n return\n\n\n def put_stream_metadata(self,p_object):\n row = Stream_MetaData(stream_details_id=p_object.stream_details_id,\n frame_number=p_object.frame_number,\n label = p_object.label,\n confidence = p_object.confidence,\n position = p_object.position,\n timestamp = p_object.timestamp,\n group_id = p_object.group_id,\n seconds = p_object.seconds)\n self.session.add(row)\n self.session.commit()\n return\n\n def update_stream_details(self,p_object):\n stmt = update(Stream_Details).where(Stream_Details.id == p_object.id). \\\n values(live='False')\n self.session.commit()\n\n def update_analytics_metaData(self,p_object):\n newval = int(p_object.value) + 1\n p_object.value = str(newval)\n self.session.commit()\n\n\ndef testHarness():\n event = {}\n event['camera_id'] = 1\n event['client_id'] = 1\n event['label'] = 'person,knife'\n event['live'] = 'True'\n\n # db = database('1')\n # instance = db.get_analytics_metaData_object('raw_file_next_value')\n # db.update_analytics_metaData(instance)\n\n db = database(1)\n print(db.get_stream_details(event['live']))\n\n p_object = Object()\n id = 'test_2_rawfile00001000.mkv'\n instance = db.get_stream_details_raw('rawfilename', id)\n print (instance)\n #p_object.id = 1\n p_object.resolution = '1280x720x3'\n p_object.start_time = datetime.datetime.strptime('2018-06-1 9:04:02', '%Y-%m-%d %H:%M:%S')\n #db.put_stream_details(p_object)\n #instance = db.get_stream_details_object1('start_time',p_object)\n\n print('')\n\n if 'client_id' in event:\n db = database(event['client_id'])\n body = db.get_client_camera()\n print(json.dumps(body))\n db.close()\n\n if 'camera_id' in event:\n db = database(event['camera_id'])\n #body = db.get_stream_details()\n #print(json.dumps(body))\n s = '2018-07-11 12:00:00'\n e = '2018-07-12 12:18:30'\n label = event['label']\n body = db.get_stream_details_by_time(s,e)\n print(json.dumps(body))\n db.close()\n\nif __name__ == '__main__':\n testHarness()","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":18435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"168094781","text":"from collections import deque\nimport random, lin_alg\n\ndef shortest_paths_from(from_user):\n\t\n\t#a dict from 'user_id' to all shortest paths to that user\n\tshortest_paths_to = { from_user[\"id\"] : [[]] }\n\t\n\t#a queue of (previous user, next user) that we need to check.\n\t#starts out with all pairs (from_user, friend_of_from_user)\n\tfrontier = deque((from_user, friend) for friend in from_user[\"friends\"])\n\t\n\t#keep going until we empty the queue\n\twhile frontier:\n\t\t\n\t\tprev_user, user = frontier.popleft() #remove first user in queue\n\t\tuser_id = user[\"id\"]\n\t\t\n\t\t#because of how we're adding to the queue,\n\t\t#we already know some of the shortest paths to prev_user\n\t\tpaths_to_prev_user = shortest_paths_to[prev_user[\"id\"]]\n\t\tnew_paths_to_user = [path + [user_id] for path in paths_to_prev_user]\n\t\t\n\t\t#it's possible we alrady know a shortest path\n\t\told_paths_to_user = shortest_paths_to.get(user_id, [])\n\t\t\n\t\t#what's the shortest path to here that we've seen so far?\n\t\tif old_paths_to_user:\n\t\t\tmin_path_length = len(old_paths_to_user[0])\n\t\telse:\n\t\t\tmin_path_length = float('inf')\n\t\t\t\n\t\t#only keep paths that aren't too long and are new\n\t\tnew_paths_to_user = [path for path in new_paths_to_user \n\t\t\t\t\t\t\t\t\t\tif len(path) <= min_path_length \n\t\t\t\t\t\t\t\t\t\tand path not in old_paths_to_user]\n\t\t\t\t\t\t\t\t\t\t\n\t\tshortest_paths_to[user_id] = old_paths_to_user + new_paths_to_user\n\t\t\n\t\t#add never-seen neighbors to the frontier\n\t\tfrontier.extend((user, friend) for friend in user[\"friends\"]\n\t\t\t\t\t\t\t\tif friend[\"id\"] not in shortest_paths_to)\n\t\t\t\t\t\t\t\t\n\treturn shortest_paths_to\n\t\ndef farness(user):\n\t\"\"\"the sum of the lengths of the shortest paths to each other user\"\"\"\n\treturn sum(len(paths[0]) for paths in user[\"shortest_paths\"].values())\n\t\ndef find_eigenvector(A, tolerance=0.00001):\n\tguess = [random.random() for _ in A]\n\t\n\twhile True:\n\t\tresult = lin_alg.matrix_operate(A, guess)\n\t\tlength = lin_alg.magnitude(result)\n\t\tnext_guess = lin_alg.scalar_multiply(1/length, result)\n\t\t\n\t\tif lin_alg.distance(guess, next_guess) < tolerance:\n\t\t\treturn next_guess, length #eigenvector, eigenvalue\n\t\t\t\n\t\tguess = next_guess\n\t\t\ndef entry_fn(i, j):\n\treturn 1 if (i, j) in friendships or (j, i) in friendships else 0\n\t\ndef page_rank(users, damping=0.85, num_iters=100):\n\t\n\t#initially distribute PageRank evenly\n\tnum_users = len(users)\n\tpr = { user[\"id\"]: 1 / num_users for user in users }\n\t\n\t#this is the small fraction of PageRank\n\t#that each node gets each iteration\n\tbase_pr = (1 - damping) / num_users\n\t\n\tfor _ in range(num_iters):\n\t\tnext_pr = { user[\"id\"] : base_pr for user in users }\n\t\tfor user in users:\n\t\t\t#distribute PageRank to outgoing links\n\t\t\tlinks_pr = pr[user[\"id\"]] * damping\n\t\t\tfor endorsee in user[\"endorses\"]:\n\t\t\t\tnext_pr[endorsee[\"id\"]] += links_pr / len(user[\"endorses\"])\n\t\t\t\t\n\t\tpr = next_pr\n\t\t\n\treturn pr\n\nif __name__ == \"__main__\":\n\n\tfriendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),\n (4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]\n\n\tusers = [\n { \"id\": 0, \"name\": \"Hero\" },\n { \"id\": 1, \"name\": \"Dunn\" },\n { \"id\": 2, \"name\": \"Sue\" },\n { \"id\": 3, \"name\": \"Chi\" },\n { \"id\": 4, \"name\": \"Thor\" },\n { \"id\": 5, \"name\": \"Clive\" },\n { \"id\": 6, \"name\": \"Hicks\" },\n { \"id\": 7, \"name\": \"Devin\" },\n { \"id\": 8, \"name\": \"Kate\" },\n { \"id\": 9, \"name\": \"Klein\" } ]\n\t\n\tfor user in users:\n\t\tuser[\"friends\"] = []\n\t\t\n\tfor i, j in friendships:\n\t\tusers[i][\"friends\"].append(users[j]) #add i as a friend of j\n\t\tusers[j][\"friends\"].append(users[i]) #add j as a friend of i\n\t\n\tfor user in users:\n\t\tuser[\"shortest_paths\"] = shortest_paths_from(user)\n\t\t\n\t#betweeness centrality\n\tfor user in users:\n\t\tuser[\"betweeness_centrality\"] = 0.0\n\t\t\n\tfor source in users:\n\t\tsource_id = source[\"id\"]\n\t\tfor target_id, paths in source[\"shortest_paths\"].items():\n\t\t\tif source_id < target_id: #don't double count\n\t\t\t\tnum_paths = len(paths) #how many shortest paths?\n\t\t\t\tcontrib = 1 / num_paths #contribution to centrality\n\t\t\t\tfor path in paths:\n\t\t\t\t\tfor id in path:\n\t\t\t\t\t\tif id not in [source_id, target_id]:\n\t\t\t\t\t\t\tusers[id][\"betweeness_centrality\"] += contrib\n\t\t\t\t\t\t\t\n\tfor user in users:\n\t\tuser[\"closeness_centrality\"] = 1 / farness(user)\n\t\n\t'''for user in users:\n\t\tprint(\"User \" + str(user[\"id\"]) + \": \" + str(user[\"shortest_paths\"]))\n\t\tprint()\n\t\tprint(\"User \" + str(user[\"id\"]) + \" betweeness centrality: \" + str(user[\"betweeness_centrality\"]))\n\t\tprint()\n\t\tprint(\"User \" + str(user[\"id\"]) + \" closeness centrality: \" + str(user[\"closeness_centrality\"]))\n\t\tprint()'''\n\t\t\n\t#eigenvector analysis\n\tn = len(users)\n\tadjacency_matrix = lin_alg.make_matrix(n, n, entry_fn)\n\teigenvector_centralities, _ = find_eigenvector(adjacency_matrix)\n\tfor i in range(0, len(eigenvector_centralities)):\n\t\tprint(\"User \" + str(i) + \": \" + str(eigenvector_centralities[i]))\n\t\t\n\t#PageRank and directed graphs (endorsements)\n\tendorsements = [(0, 1), (1, 0), (0, 2), (2, 0), (1, 2), (2, 1), (1, 3),\n (2, 3), (3, 4), (5, 4), (5, 6), (7, 5), (6, 8), (8, 7), (8, 9)]\n\n\tfor user in users:\n\t\tuser[\"endorses\"] = [] # add one list to track outgoing endorsements\n\t\tuser[\"endorsed_by\"] = [] # and another to track endorsements\n\t\t\n\tfor source_id, target_id in endorsements:\n\t\tusers[source_id][\"endorses\"].append(users[target_id])\n\t\tusers[target_id][\"endorsed_by\"].append(users[source_id])\n\n\n\tendorsements_by_id = [(user[\"id\"], len(user[\"endorsed_by\"]))\n\t\t\t\t\t\t for user in users]\n\n\tsorted(endorsements_by_id, \n\t\t key=lambda endorsements_by_id: endorsements_by_id[1],\n\t\t reverse=True)\n\t\t \n\tfor user_id, pr in page_rank(users).items():\n\t\tprint(user_id, pr)","sub_path":"network_analysis.py","file_name":"network_analysis.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"70698414","text":"from datetime import timedelta, datetime\nfrom functools import wraps\nimport jwt\nfrom falcon import HTTPForbidden\n\nDEFAULT_CONFIG = {\n 'TOKEN_PREFIX': 'Bearer',\n\n 'TOKENS_EXPIRY': timedelta(hours=1),\n 'TOKENS_LEEWAY': timedelta(seconds=0),\n\n 'TOKENS_AUTHORIZE_ENDPOINT': '/auth',\n\n 'TOKENS_ENABLE_REFRESH': False,\n 'TOKENS_REFRESH_ENDPOINT': '/auth/refresh',\n 'SECRET_KEY': 'supersecretkey1234567890'\n}\n\n\n###############################################################################\n\nclass AuthMiddleware(object):\n def process_request(self, req, resp):\n \"\"\"\n WGSI Middleware event\n :param req: request\n :param resp: response\n\n \"\"\"\n\n _check = dict()\n\n if self._verify_token(req.auth):\n _check['valid'] = True\n else:\n _check['valid'] = False\n\n req.context.update(_check)\n\n def _verify_token(self, auth):\n \"\"\"\n Verify req.auth data with JWT specification\n :param auth: req.auth value\n :return: bool - is valid token\n \"\"\"\n if not auth.startswith(DEFAULT_CONFIG['TOKEN_PREFIX']):\n return False\n\n _token = auth[len(DEFAULT_CONFIG['TOKEN_PREFIX']):]\n\n try:\n\n payload = self._decode(_token)\n if not payload:\n return False\n except:\n pass\n\n return False\n\n def _make_token(self, user):\n if not user:\n return None\n\n # Return a ready-made token\n return self._encode(self._make_payload(user))\n\n def _make_payload(self, user, payload={}):\n for key, value in user.items():\n if value:\n payload[key] = value\n # Add an expiry date in there\n expiry = DEFAULT_CONFIG.get('TOKENS_EXPIRY')\n payload['exp'] = datetime.utcnow() + expiry\n return payload\n\n def _encode(self, payload):\n secret = DEFAULT_CONFIG.get('SECRET_KEY')\n return jwt.encode(payload, secret)\n\n def _decode(self, token, verify_expiration=True):\n try:\n # Try to decode the token - this blows up spectacularly if it fails\n leeway = DEFAULT_CONFIG.get('TOKENS_LEEWAY')\n return jwt.decode(token, DEFAULT_CONFIG.get('SECRET_KEY'), leeway=leeway.total_seconds())\n except jwt.DecodeError:\n # The token was tampered with, corrupted or otherwise invalid\n return None\n except jwt.ExpiredSignature:\n # The token has already expired, and the leeway couldn't save it :(\n return None\n\n\ndef token_required(func):\n @wraps(func)\n def f(*args, **kwargs):\n if not False:\n raise HTTPForbidden\n return func(*args, **kwargs)\n\n return f\n\n###############################################################################\n","sub_path":"app/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"586335134","text":"from strategy import CashStrategy \nfrom strategy import Credit_or_Debit_CardStrategy\nfrom strategy import Money_or_Bank_TransferStrategy\nfrom menu import payment_menu\n\n#A dictionary with all available payment methods.\n#Each str number is a Strategy\naccepted_payment_methods = {\n \"1\" : Credit_or_Debit_CardStrategy ,\n \"2\" : Money_or_Bank_TransferStrategy ,\n \"3\" : CashStrategy ,\n}\n\n\ndef paymentMethod():\n \"\"\"\n Function that helps the client pick the strategy he wants.\n In try except we check if the option he gave is valid.\n If not , respond with appropriate message.\n \"\"\"\n payment_menu() #Call's function to show the options of payment from menu.py\n while True:\n payment_list = [\"1\" , \"2\" , \"3\"]\n try:\n payment_method = int(input(\"Please choose: \"))\n payment_method = str(payment_method)\n if len(payment_method) != 1: #Choise is len == 1 always\n raise IndexError\n if payment_method not in payment_list:\n raise ValueError\n break\n except ValueError:\n print(\"You must give the appropriate number as shown in the table !\")\n except IndexError:\n print(\"You must choose between the three payment methods. \"\n \"Your choice can't have more than one number !!\")\n\n #We use the dictionary with our options and instantiate it using () at the end.\n return (accepted_payment_methods[payment_method])()","sub_path":"payMethods.py","file_name":"payMethods.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"3601940","text":"#-*- coding:utf-8 -*-\nfrom flask import Flask, render_template, redirect, request\nfrom flaskext.mysql import MySQL\n\n\nmysql = MySQL()\napp = Flask(__name__)\napp.config['MYSQL_DATABASE_USER'] = 'serverstudy'\napp.config['MYSQL_DATABASE_PASSWORD'] = 'serverstudy!@#'\napp.config['MYSQL_DATABASE_DB'] = 'serverstudy'\napp.config['MYSQL_DATABASE_HOST'] = 'data.khuhacker.com'\napp.config['MYSQL_CHARSET'] = 'utf-8'\nmysql.init_app(app)\n\n@app.route('/guestbook')\ndef guestbook():\n cursor = mysql.connect().cursor()\n cursor.execute('SELECT * FROM KDG_users')\n datas = cursor.fetchall()\n cursor.close()\n return render_template(\"guestbook.html\", datas=datas)\n\n@app.route('/submit', methods=['POST'])\ndef submit():\n if request.method == \"POST\":\n Num = request.form[\"Num\"]\n author = request.form[\"Name\"]\n Comment = request.form[\"Comment\"]\n con = mysql.connect()\n cur = con.cursor()\n if (Num != '') and (author == '') and (Comment == ''):\n cur.execute('DELETE FROM KDG_users WHERE num=' + Num)\n elif (author != '') and (Comment != '') and (Num == ''):\n cur.execute('INSERT INTO KDG_users (author, comment) VALUES (%s, %s)',(author, Comment))\n else:\n cur.execute(\"UPDATE KDG_users SET author=\" + \"'\" +author+ \"'\" + \", comment=\" + \"'\" +Comment+ \"'\" + \"WHERE num=\" + Num)\n con.commit()\n cur.close()\n return redirect('/guestbook')\n\n\n'''\n@app.route('/userlist/')\ndef showUsers():\n cur = mysql.connect().cursor()\n cur.execute('SELECT * FROM KDG_users')\n data = cur.fetchall()\n cur.close()\n output = \"\"\n\n for user in data:\n output += \"Num : %s, Name : %s, Content : %s
\"%(user[0], user[1], user[2])\n return output\n\n@app.route('/adduser//')\ndef addUser(Name, Content):\n con = mysql.connect()\n cur = con.cursor()\n cur.execute('INSERT INTO KDG_users (name, content) VALUES (%s, %s)',(Name, Content))\n con.commit()\n cur.close()\n return redirect('/userlist')\n\n@app.route('/index')\n@app.route('/index/')\ndef hello_world(name=''):\n return render_template('hello.html', username=name)\n\n@app.route('/sum//')\ndef sum(num1,num2):\n return str(num1+num2)'''\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"python/day10/Flask.py","file_name":"Flask.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"269541468","text":"\"\"\"This module provides file I/O for Quake LMP lump files.\n\nExample:\n lmp_file = lmp.Lmp.open('palette.lmp')\n\nReferences:\n Quake Source\n - id Software\n - https://github.com/id-Software/Quake\n\n Quake Documentation Version 3.4\n - Olivier Montanuy, et al.\n - http://www.gamers.org/dEngine/quake/spec/quake-spec34/qkspec_7.htm#CWADS\n\"\"\"\n\nimport io\nimport struct\n\n__all__ = ['BadLmpFile', 'Lmp']\n\n\nclass BadLmpFile(Exception):\n pass\n\n\n# The header structure for 2D lumps\nheader_format = '<2l'\nheader_size = struct.calcsize(header_format)\n\n# The data structure for palette lumps\npalette_format = '<768B'\npalette_size = struct.calcsize(palette_format)\n\n# The data structure for colormap lumps\ncolormap_format = '<16384B'\ncolormap_size = struct.calcsize(colormap_format)\n\n# For some reason the colormap shipped with Quake has one extra byte\nquake_colormap_size = struct.calcsize('<16385B')\n\ndefault_palette = (\n (0x00,0x00,0x00),(0x0f,0x0f,0x0f),(0x1f,0x1f,0x1f),(0x2f,0x2f,0x2f),\n (0x3f,0x3f,0x3f),(0x4b,0x4b,0x4b),(0x5b,0x5b,0x5b),(0x6b,0x6b,0x6b),\n (0x7b,0x7b,0x7b),(0x8b,0x8b,0x8b),(0x9b,0x9b,0x9b),(0xab,0xab,0xab),\n (0xbb,0xbb,0xbb),(0xcb,0xcb,0xcb),(0xdb,0xdb,0xdb),(0xeb,0xeb,0xeb),\n (0x0f,0x0b,0x07),(0x17,0x0f,0x0b),(0x1f,0x17,0x0b),(0x27,0x1b,0x0f),\n (0x2f,0x23,0x13),(0x37,0x2b,0x17),(0x3f,0x2f,0x17),(0x4b,0x37,0x1b),\n (0x53,0x3b,0x1b),(0x5b,0x43,0x1f),(0x63,0x4b,0x1f),(0x6b,0x53,0x1f),\n (0x73,0x57,0x1f),(0x7b,0x5f,0x23),(0x83,0x67,0x23),(0x8f,0x6f,0x23),\n (0x0b,0x0b,0x0f),(0x13,0x13,0x1b),(0x1b,0x1b,0x27),(0x27,0x27,0x33),\n (0x2f,0x2f,0x3f),(0x37,0x37,0x4b),(0x3f,0x3f,0x57),(0x47,0x47,0x67),\n (0x4f,0x4f,0x73),(0x5b,0x5b,0x7f),(0x63,0x63,0x8b),(0x6b,0x6b,0x97),\n (0x73,0x73,0xa3),(0x7b,0x7b,0xaf),(0x83,0x83,0xbb),(0x8b,0x8b,0xcb),\n (0x00,0x00,0x00),(0x07,0x07,0x00),(0x0b,0x0b,0x00),(0x13,0x13,0x00),\n (0x1b,0x1b,0x00),(0x23,0x23,0x00),(0x2b,0x2b,0x07),(0x2f,0x2f,0x07),\n (0x37,0x37,0x07),(0x3f,0x3f,0x07),(0x47,0x47,0x07),(0x4b,0x4b,0x0b),\n (0x53,0x53,0x0b),(0x5b,0x5b,0x0b),(0x63,0x63,0x0b),(0x6b,0x6b,0x0f),\n (0x07,0x00,0x00),(0x0f,0x00,0x00),(0x17,0x00,0x00),(0x1f,0x00,0x00),\n (0x27,0x00,0x00),(0x2f,0x00,0x00),(0x37,0x00,0x00),(0x3f,0x00,0x00),\n (0x47,0x00,0x00),(0x4f,0x00,0x00),(0x57,0x00,0x00),(0x5f,0x00,0x00),\n (0x67,0x00,0x00),(0x6f,0x00,0x00),(0x77,0x00,0x00),(0x7f,0x00,0x00),\n (0x13,0x13,0x00),(0x1b,0x1b,0x00),(0x23,0x23,0x00),(0x2f,0x2b,0x00),\n (0x37,0x2f,0x00),(0x43,0x37,0x00),(0x4b,0x3b,0x07),(0x57,0x43,0x07),\n (0x5f,0x47,0x07),(0x6b,0x4b,0x0b),(0x77,0x53,0x0f),(0x83,0x57,0x13),\n (0x8b,0x5b,0x13),(0x97,0x5f,0x1b),(0xa3,0x63,0x1f),(0xaf,0x67,0x23),\n (0x23,0x13,0x07),(0x2f,0x17,0x0b),(0x3b,0x1f,0x0f),(0x4b,0x23,0x13),\n (0x57,0x2b,0x17),(0x63,0x2f,0x1f),(0x73,0x37,0x23),(0x7f,0x3b,0x2b),\n (0x8f,0x43,0x33),(0x9f,0x4f,0x33),(0xaf,0x63,0x2f),(0xbf,0x77,0x2f),\n (0xcf,0x8f,0x2b),(0xdf,0xab,0x27),(0xef,0xcb,0x1f),(0xff,0xf3,0x1b),\n (0x0b,0x07,0x00),(0x1b,0x13,0x00),(0x2b,0x23,0x0f),(0x37,0x2b,0x13),\n (0x47,0x33,0x1b),(0x53,0x37,0x23),(0x63,0x3f,0x2b),(0x6f,0x47,0x33),\n (0x7f,0x53,0x3f),(0x8b,0x5f,0x47),(0x9b,0x6b,0x53),(0xa7,0x7b,0x5f),\n (0xb7,0x87,0x6b),(0xc3,0x93,0x7b),(0xd3,0xa3,0x8b),(0xe3,0xb3,0x97),\n (0xab,0x8b,0xa3),(0x9f,0x7f,0x97),(0x93,0x73,0x87),(0x8b,0x67,0x7b),\n (0x7f,0x5b,0x6f),(0x77,0x53,0x63),(0x6b,0x4b,0x57),(0x5f,0x3f,0x4b),\n (0x57,0x37,0x43),(0x4b,0x2f,0x37),(0x43,0x27,0x2f),(0x37,0x1f,0x23),\n (0x2b,0x17,0x1b),(0x23,0x13,0x13),(0x17,0x0b,0x0b),(0x0f,0x07,0x07),\n (0xbb,0x73,0x9f),(0xaf,0x6b,0x8f),(0xa3,0x5f,0x83),(0x97,0x57,0x77),\n (0x8b,0x4f,0x6b),(0x7f,0x4b,0x5f),(0x73,0x43,0x53),(0x6b,0x3b,0x4b),\n (0x5f,0x33,0x3f),(0x53,0x2b,0x37),(0x47,0x23,0x2b),(0x3b,0x1f,0x23),\n (0x2f,0x17,0x1b),(0x23,0x13,0x13),(0x17,0x0b,0x0b),(0x0f,0x07,0x07),\n (0xdb,0xc3,0xbb),(0xcb,0xb3,0xa7),(0xbf,0xa3,0x9b),(0xaf,0x97,0x8b),\n (0xa3,0x87,0x7b),(0x97,0x7b,0x6f),(0x87,0x6f,0x5f),(0x7b,0x63,0x53),\n (0x6b,0x57,0x47),(0x5f,0x4b,0x3b),(0x53,0x3f,0x33),(0x43,0x33,0x27),\n (0x37,0x2b,0x1f),(0x27,0x1f,0x17),(0x1b,0x13,0x0f),(0x0f,0x0b,0x07),\n (0x6f,0x83,0x7b),(0x67,0x7b,0x6f),(0x5f,0x73,0x67),(0x57,0x6b,0x5f),\n (0x4f,0x63,0x57),(0x47,0x5b,0x4f),(0x3f,0x53,0x47),(0x37,0x4b,0x3f),\n (0x2f,0x43,0x37),(0x2b,0x3b,0x2f),(0x23,0x33,0x27),(0x1f,0x2b,0x1f),\n (0x17,0x23,0x17),(0x0f,0x1b,0x13),(0x0b,0x13,0x0b),(0x07,0x0b,0x07),\n (0xff,0xf3,0x1b),(0xef,0xdf,0x17),(0xdb,0xcb,0x13),(0xcb,0xb7,0x0f),\n (0xbb,0xa7,0x0f),(0xab,0x97,0x0b),(0x9b,0x83,0x07),(0x8b,0x73,0x07),\n (0x7b,0x63,0x07),(0x6b,0x53,0x00),(0x5b,0x47,0x00),(0x4b,0x37,0x00),\n (0x3b,0x2b,0x00),(0x2b,0x1f,0x00),(0x1b,0x0f,0x00),(0x0b,0x07,0x00),\n (0x00,0x00,0xff),(0x0b,0x0b,0xef),(0x13,0x13,0xdf),(0x1b,0x1b,0xcf),\n (0x23,0x23,0xbf),(0x2b,0x2b,0xaf),(0x2f,0x2f,0x9f),(0x2f,0x2f,0x8f),\n (0x2f,0x2f,0x7f),(0x2f,0x2f,0x6f),(0x2f,0x2f,0x5f),(0x2b,0x2b,0x4f),\n (0x23,0x23,0x3f),(0x1b,0x1b,0x2f),(0x13,0x13,0x1f),(0x0b,0x0b,0x0f),\n (0x2b,0x00,0x00),(0x3b,0x00,0x00),(0x4b,0x07,0x00),(0x5f,0x07,0x00),\n (0x6f,0x0f,0x00),(0x7f,0x17,0x07),(0x93,0x1f,0x07),(0xa3,0x27,0x0b),\n (0xb7,0x33,0x0f),(0xc3,0x4b,0x1b),(0xcf,0x63,0x2b),(0xdb,0x7f,0x3b),\n (0xe3,0x97,0x4f),(0xe7,0xab,0x5f),(0xef,0xbf,0x77),(0xf7,0xd3,0x8b),\n (0xa7,0x7b,0x3b),(0xb7,0x9b,0x37),(0xc7,0xc3,0x37),(0xe7,0xe3,0x57),\n (0x7f,0xbf,0xff),(0xab,0xe7,0xff),(0xd7,0xff,0xff),(0x67,0x00,0x00),\n (0x8b,0x00,0x00),(0xb3,0x00,0x00),(0xd7,0x00,0x00),(0xff,0x00,0x00),\n (0xff,0xf3,0x93),(0xff,0xf7,0xc7),(0xff,0xff,0xff),(0x9f,0x5b,0x53),\n)\n\n\nclass Image(object):\n \"\"\"Class for representing pixel data\n\n Attributes:\n width: The width of the image.\n\n height: The height of the image.\n\n format: A string describing the format of the color data. Usually 'RGB'\n or 'RGBA'\n\n pixels: The raw pixel data of the image.\n The length of this attribute is:\n\n width * height * len(format)\n \"\"\"\n\n __slots__ = (\n 'width',\n 'height',\n 'format',\n 'pixels'\n )\n\n def __init__(self):\n self.width = 0\n self.height = 0\n self.format = 'RGBA'\n self.pixels = None\n\n\nclass Lmp(object):\n \"\"\"Class for working with Lmp files\n\n There are three different types of lump files:\n 1. 2D image - The majority of the lump files are 2D images. If a lump \n is a 2D image it will have width, height, and pixels attributes.\n\n 2. Palette - The palette lump has the palette attribute which is a \n list of 256 RGB tuples. This is used to map color indexes to \n actual RGB triples.\n\n 3. Colormap - The colormap lump has the colormap attribute which is a\n list of 16384 color indexes. It functions as a 256 x 64 table for \n mapping colors to different values for lighting.\n \n Example:\n l = Lmp.open(file)\n\n Attributes:\n width: (2D image lump only) The width of the lump.\n\n height: (2D image lump only) The height of the lump.\n\n pixels: (2D image lump only) The raw pixel data.\n\n palette: (Palette lump only) A list of 256 RGB tuples.\n\n colormap: (Color Map lump only) A list of 16384 color indexes.\n \"\"\"\n\n def __init__(self):\n self.fp = None\n self.mode = None\n self._did_modify = False\n\n @staticmethod\n def open(file, mode='r'):\n \"\"\"Returns an Lmp object\n \n Args:\n file: Either the path to the file, a file-like object, or bytes.\n\n mode: An optional string that indicates which mode to open the file\n\n Returns:\n An Lmp object constructed from the information read from the\n file-like object.\n\n Raises:\n ValueError: If an invalid file mode is given.\n\n RuntimeError: If the file argument is not a file-like object.\n \"\"\"\n\n if mode not in ('r', 'w', 'a'):\n raise ValueError(\"invalid mode: '%s'\" % mode)\n\n filemode = {'r': 'rb', 'w': 'w+b', 'a': 'r+b'}[mode]\n\n if isinstance(file, str):\n file = io.open(file, filemode)\n\n elif isinstance(file, bytes):\n file = io.BytesIO(file)\n\n elif not hasattr(file, 'read'):\n raise RuntimeError(\"Lmp.open() requires 'file' to be a path, a file-like object, or bytes\")\n\n # Read\n if mode == 'r':\n return Lmp._read_file(file, mode)\n\n # Write\n elif mode == 'w':\n lmp = Lmp()\n lmp.fp = file\n lmp.mode = 'w'\n lmp._did_modify = True\n\n return lmp\n\n # Append\n else:\n lmp = Lmp._read_file(file, mode)\n lmp._did_modify = True\n\n return lmp\n\n @staticmethod\n def _read_file(file, mode):\n data = file.read(-1)\n data_size = len(data)\n\n width, height = struct.unpack(header_format, data[0:8])\n\n # Determine which kind of lump we are working with\n if width * height + header_size == data_size:\n lump = Lmp._read_lmp(data)\n\n elif data_size == palette_size:\n lump = Lmp._read_palette(data)\n\n elif data_size == colormap_size or data_size == quake_colormap_size:\n lump = Lmp._read_colormap(data)\n\n else:\n raise BadLmpFile(\"unable to determine format of lump file\")\n\n lump.fp = file\n lump.mode = mode\n\n return lump\n\n @staticmethod\n def _read_lmp(data):\n \"\"\"Returns a 2D image lump.\n \n Args:\n data: A byte array\n \"\"\"\n\n width, height = struct.unpack(header_format, data[:header_size])\n\n pixels_format = '<%iB' % (width * height)\n pixels_size = struct.calcsize(pixels_format)\n\n pixels = data[header_size:pixels_size + header_size]\n pixels = struct.unpack(pixels_format, pixels)\n\n lmp = Lmp()\n lmp.width = width\n lmp.height = height\n lmp.pixels = pixels\n\n return lmp\n\n @staticmethod\n def _read_palette(data):\n \"\"\"Returns a palette lump\n \n Args:\n data: A byte array.\n \"\"\"\n\n data = struct.unpack(palette_format, data)\n\n pixels = []\n i = 0\n\n while i < 768:\n pixels.append((data[i], data[i + 1], data[i + 2]))\n i += 3\n\n lmp = Lmp()\n lmp.palette = pixels\n\n return lmp\n\n @staticmethod\n def _read_colormap(data):\n \"\"\"Returns a colormap lump\n \n Args:\n data: A byte array.\n \"\"\"\n\n data = struct.unpack(colormap_format, data[:colormap_size])\n\n lmp = Lmp()\n lmp.colormap = data\n\n return lmp\n\n @staticmethod\n def _write_lmp(file, lmp):\n header_data = struct.pack(header_format,\n lmp.width,\n lmp.height)\n\n pixels_format = '<%iB' % (lmp.width * lmp.height)\n pixels_data = struct.pack(pixels_format,\n *lmp.pixels)\n\n file.write(header_data)\n file.write(pixels_data)\n\n @staticmethod\n def _write_palette(file, lmp):\n # Flatten out palette\n palette = []\n\n for i in lmp.palette:\n palette.append(i[0])\n palette.append(i[1])\n palette.append(i[2])\n\n if len(palette) != 768:\n raise BadLmpFile\n\n palette_data = struct.pack(palette_format,\n *palette)\n\n file.write(palette_data)\n\n @staticmethod\n def _write_colormap(file, lmp):\n if len(lmp.colormap) != colormap_size:\n raise BadLmpFile\n\n colormap_data = struct.pack(colormap_format,\n *lmp.colormap)\n\n file.write(colormap_data)\n\n @staticmethod\n def _write_file(file, lmp):\n if hasattr(lmp, 'width') and hasattr(lmp, 'width'):\n Lmp._write_lmp(file, lmp)\n\n elif hasattr(lmp, 'palette'):\n Lmp._write_palette(file, lmp)\n\n elif hasattr(lmp, 'colormap'):\n Lmp._write_colormap(file, lmp)\n\n else:\n raise BadLmpFile('Unable to determine type of Lmp file to write')\n\n @staticmethod\n def write(file, lmp):\n Lmp._write_file(file, lmp)\n\n def save(self, file):\n \"\"\"Writes Lmp data to file\n\n Args:\n file: Either the path to the file, or a file-like object, or bytes.\n\n Raises:\n RuntimeError: If file argument is not a file-like object.\n\n BadLmpFile: If unable to determine type of Lmp to write.\n \"\"\"\n\n should_close = False\n\n if isinstance(file, str):\n file = io.open(file, 'r+b')\n should_close = True\n\n elif isinstance(file, bytes):\n file = io.BytesIO(file)\n should_close = True\n\n elif not hasattr(file, 'write'):\n raise RuntimeError(\n \"Lmp.open() requires 'file' to be a path, a file-like object, \"\n \"or bytes\")\n\n Lmp._write_file(file, self)\n\n if should_close:\n file.close()\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n self.close()\n\n def close(self):\n \"\"\"Closes the file pointer if possible. If mode is 'w' or 'a', the file\n will be written to.\n \"\"\"\n\n if self.fp:\n if self.mode in ('w', 'a') and self._did_modify:\n self.fp.seek(0)\n Lmp._write_file(self.fp, self)\n self.fp.truncate()\n\n file_object = self.fp\n self.fp = None\n file_object.close()\n\n def image(self, palette=default_palette):\n \"\"\"Returns an Image object.\n\n Args:\n palette: A 256 color palette to use for converted index color data\n to RGBA data.\n\n Returns:\n An Image object.\n \"\"\"\n\n image = Image()\n\n if hasattr(self, 'palette'):\n image.width = 16\n image.height = 16\n \n p = []\n for i, entry in enumerate(self.palette):\n p += (entry)\n p += [255] if i is not 255 else [0]\n\n image.pixels = p\n\n elif hasattr(self, 'colormap'):\n image.width = 256\n image.height = 64\n \n p = []\n for index in self.colormap:\n p += palette[index]\n p += [255] if index is not 255 else [0]\n\n image.pixels = p\n\n else:\n image.width = self.width\n image.height = self.height\n \n p = []\n for index in self.pixels:\n p += palette[index]\n p += [255] if index is not 255 else [0]\n\n image.pixels = p\n\n d = []\n for row in reversed(range(image.height)):\n d += image.pixels[row * image.width * 4:(row + 1) * image.width * 4]\n\n image.pixels = d\n\n return image\n","sub_path":"quake/quake/lmp.py","file_name":"lmp.py","file_ext":"py","file_size_in_byte":15081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"505694735","text":"import json\n\n#Source data path configuration\ndistricts_data_path = '/home/pixnet/code/poi_data/taiwan_districts_mapping.json'\npoi_addr_data_path = '/home/pixnet/code/poi_data/location_POIs.json'\npoiseq_data_path = '/home/pixnet/code/poi_data/POIseq2articles.json'\npopularity_data_path = '/home/pixnet/code/popularity/url_popularity.json'\npoi_data_path = '/home/pixnet/code/poi_data/scenic_spot_C_f_with_pic.json'\nurl_title_data_path = '/home/pixnet/code/WebAPI/url_title.json'\n\ndef read_data(data_path):\n data_file = open(data_path,encoding='utf-8-sig')\n data = json.loads(data_file.read())\n return data\n\nclass API:\n def __init__(self):\n global districts_data_path,poi_addr_data_path,poiseq_data_path,popularity_data_path,poi_data_path,url_title_data_path\n self.poi_data = read_data(poi_data_path)\n self.distircts_data = read_data(districts_data_path)\n self.poi_addr_data = read_data(poi_addr_data_path)\n self.poiseq_data = read_data(poiseq_data_path)\n self.popularity_data = read_data(popularity_data_path)\n self.url_title_data = read_data(url_title_data_path)\n self.poi_info_dict = {}\n self.generate_poi_addr_dict()\n\n def generate_poi_addr_dict(self):\n poi_data = self.poi_data\n poi_data_list = poi_data['XML_Head']['Infos']['Info']\n for poi in poi_data_list:\n self.poi_info_dict[poi['Name']] = {}\n self.poi_info_dict[poi['Name']]['Add'] = poi['Add']\n if(poi['Picture1']!=''):\n self.poi_info_dict[poi['Name']]['pic'] = poi['Picture1']\n self.poi_info_dict[poi['Name']]['pic_des'] = poi['Picdescribe1']\n else:\n self.poi_info_dict[poi['Name']]['pic'] = 'No picture'\n self.poi_info_dict[poi['Name']]['pic_des'] = 'No picture'\n \n def parse_location(self,location):\n districts = self.distircts_data\n match_addr_list = []\n location = location.replace('台','臺')\n for key in districts.keys():\n if(key.find(location)!=-1):\n for sub_key in districts[key].keys():\n match_addr_list.append((key,sub_key))\n else:\n for sub_key in districts[key].keys():\n if(sub_key.find(location)!=-1):\n match_addr_list.append((key,sub_key))\n return match_addr_list\n\n def all_location(self):\n districts = self.distircts_data\n addr_list = []\n for key in districts.keys():\n for sub_key in districts[key].keys():\n addr_list.append((key,sub_key))\n return addr_list\n \n def districts_to_POIs(self,addr_list):\n pois = self.poi_addr_data\n match_poi_list = []\n for key1,key2 in addr_list:\n match_poi_list += [poi['Name'] for poi in pois[key1][key2]]\n return match_poi_list\n\n def get_poiseq(self,pois_list):\n seqs = self.poiseq_data\n popularity = self.popularity_data\n seq_list = []\n for seq_str in seqs.keys():\n tmp_dict = {}\n add_list = []\n title_list = []\n pic_list = []\n pic_des_list = []\n seq = eval(seq_str)\n for poi in seq:\n add_list.append(self.poi_info_dict[poi]['Add'])\n pic_list.append(self.poi_info_dict[poi]['pic'])\n pic_des_list.append(self.poi_info_dict[poi]['pic_des'])\n for url in seqs[seq_str]:\n title_list.append(self.url_title_data[url])\n tmp_dict['poi_seq'] = seq\n tmp_dict['addr_seq'] = add_list\n tmp_dict['match_score'] = float(len([poi for poi in seq if(poi in pois_list)]))/float(len(seq))\n tmp_dict['popular_score'] = sum([float(popularity[url]) for url in seqs[seq_str] if popularity[url]])\n tmp_dict['url_list'] = seqs[seq_str]\n tmp_dict['title_list'] = title_list\n tmp_dict['pic_list'] = pic_list\n tmp_dict['pic_des_list'] = pic_des_list\n seq_list.append(tmp_dict)\n return seq_list\n\n def get_POI_addr_info(self):\n return self.poi_addr_dict\n \n def compose_poi_seqs(self,seq_list,days):\n comp_list = []\n if(days==0):\n for seq in seq_list:\n comp_list.append([seq])\n return comp_list\n\n def call_search_API(self,location=None,time=None,activity=None):\n if(location != None):\n addr_list = self.parse_location(location)\n pois_list = self.districts_to_POIs(addr_list)\n seq_list = self.get_poiseq(pois_list)\n seq_list = [seq for seq in seq_list if seq['match_score']>=1]\n seq_list.sort(key= lambda seq: (seq['match_score'],seq['popular_score']),reverse=True)\n #return seq_list\n else:\n addr_list = self.all_location()\n pois_list = self.districts_to_POIs(addr_list)\n seq_list = self.get_poiseq(pois_list)\n seq_list.sort(key= lambda seq: (seq['match_score'],seq['popular_score']),reverse=True)\n #TODO: process query with time and activity\n if(time == None):\n result = self.compose_poi_seqs(seq_list,0)\n else:\n result = self.compose_poi_seqs(seq_list,time)\n #if(activity=None):\n result = [poi_seq for poi_seq in result if(len(poi_seq[0]['poi_seq'])>1 and len(poi_seq[0]['poi_seq'])<5)]\n return result\n\n\"\"\"\nUsage\n==============================================================\nInit a API() object when starting server\nSend a request with function call_API(location,time,activity)\nThis will return a list of dict contain POI seqeunce infos\n==============================================================\n\npixnetAPI = API()\n#print(pixnetAPI.get_POI_addr_info())\noutput = pixnetAPI.call_search_API('台南')\noutput = [poi_seq for poi_seq in output if(len(poi_seq[0]['poi_seq'])>1 and len(poi_seq[0]['poi_seq'])<5)]\nwith open('台南_output.txt','w') as f:\n output_str = str(output)\n f.write(output_str)\nprint(len(output))\n'''\nfor i in range(10):\n if(len(output[i][0]['title_list'])<1):\n print(output[i])\n'''\n#print([out for out in output if len(out['url_list'])>1])\n#for i in range(20):\n# print(output[i])\n\"\"\"\n \n","sub_path":"test_server/pixnetproject/tripapp/API/test_API.py","file_name":"test_API.py","file_ext":"py","file_size_in_byte":6313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"71502926","text":"from django.contrib.contenttypes.models import ContentType\nfrom django.db.models import Q\nfrom django.contrib import messages\nfrom django.utils import timezone\nfrom django.http import HttpResponseRedirect,Http404\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.shortcuts import render, get_object_or_404, redirect\n\nfrom .forms import PostForm\nfrom .models import Post\nfrom comments.models import Comment\nfrom comments.forms import CommentForm\n\n# Create your views here.\n\ndef post_create(request):\n form=PostForm(request.POST or None, request.FILES or None)\n # if not request.user.is_staff or not request.user.is_superuser:\n # raise Http404\n if not request.user.is_authenticated:\n raise Http404\n if form.is_valid():\n instance = form.save(commit=False)\n instance.user=request.user\n instance.save()\n messages.success(request,\"Successfully Created\")\n return HttpResponseRedirect(instance.get_absolute_url())\n else:\n messages.error(request,\"Not Successfully Created\")\n context= {\n \"form\":form,\n }\n return render(request,\"post_form.html\",context)\n\ndef post_detail(request,slug=None):\n instance = get_object_or_404(Post,slug=slug)\n if not request.user.is_staff or not request.user.is_superuser:\n if instance.draft or instance.publish>timezone.now().date():\n raise Http404\n\n inicial_date_new_comment={\n \"obj_id\" : instance.id,\n \"timestamp\" : timezone.now()\n }\n\n form = CommentForm(request.POST or None, initial=inicial_date_new_comment)\n if form.is_valid():\n content_type = ContentType.objects.get_for_model(instance)\n obj_id = form.cleaned_data.get(\"obj_id\")\n content_data = form.cleaned_data.get(\"content\")\n parent_obj=None\n try:\n parent_id=int(request.POST.get(\"parent_id\"))\n except:\n parent_id=None\n\n if parent_id:\n parent_qs=Comment.objects.filter(id=parent_id)\n if parent_qs.exists() | parent_qs.count()==1:\n parent_obj=parent_qs.first()\n\n new_comment, created = Comment.objects.get_or_create(\n object_id=obj_id,\n content_type=content_type,\n content=content_data,\n parent=parent_obj\n )\n\n return HttpResponseRedirect(new_comment.content_object.get_absolute_url())\n\n comments=Comment.objects.filter_by_intance(instance)\n\n context= {\n \"title\":instance.title,\n \"obj\":instance,\n \"comments\":comments,\n \"comment_form\": form\n }\n return render(request,\"post_detail.html\",context)\n\ndef post_list(request):\n\n # if request.user.is_authenticated():\n # context={\n # \"title\": \"My User List\"\n # }\n\n # else:\n # context={\n # \"title\": \"List\"\n # }\n queryset_list=Post.objects.active()\n today=timezone.now().date()\n if request.user.is_staff or request.user.is_superuser:\n queryset_list=Post.objects.all()\n\n query= request.GET.get(\"q\")\n if query:\n queryset_list=queryset_list.filter(\n Q(title__icontains=query) |\n Q(context__icontains=query) |\n Q(user__first_name__icontains=query) |\n Q(user__last_name__icontains=query) \n ).distinct()\n\n paginator = Paginator(queryset_list, 2) # Show 25 contacts per page\n page_request_var=\"cosita\"\n page = request.GET.get(page_request_var)\n try:\n queryset = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n queryset = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n queryset = paginator.page(paginator.num_pages)\n context= {\n \"page_request_var\":page_request_var,\n \"object_list\":queryset,\n \"title\": \"List\",\n \"today\": today\n }\n\n return render(request,\"post_list.html\",context)\n\n\n\ndef post_update(request, slug=None):\n if not request.user.is_staff or not request.user.is_superuser:\n raise Http404\n instance = get_object_or_404(Post,slug=slug)\n form=PostForm(request.POST or None, request.FILES or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n messages.success(request,\"Saved\")\n return HttpResponseRedirect(instance.get_absolute_url())\n context= {\n \"form\":form,\n \"title\":instance.title,\n \"obj\":instance\n }\n return render(request,\"post_form.html\",context)\n\ndef post_delete(request, id=None):\n if not request.user.is_staff or not request.user.is_superuser:\n raise Http404\n instance = get_object_or_404(Post,id=id)\n instance.delete()\n messages.success(request,\"Successfully Deleted\")\n return redirect(\"posts:List\")","sub_path":"posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"621585208","text":"\"\"\"\nwikQuery.py: Wiki searches.\n\nCopyright (C) 2007,2008 Frank McIngvale\n\nContact: fmcingvale@gmail.com\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\"\"\"\n\nWikklyQueryAllFields = ['Name', 'Author', 'Content', 'Tags']\n\nclass WikklyQueryBase(object):\n\t\"\"\"\n\tAbstract base class. All query types are subclassed from this.\n\t\"\"\"\n\tdef fieldjoin(self, item, fields):\n\t\t\"Return combined text of the named fields.\"\n\t\tfrom wikklytext.store import tags_join\n\t\tj = u''\n\t\tif 'Name' in fields:\n\t\t\tj += u' ' + item.name\n\t\t\n\t\tif 'Author' in fields:\n\t\t\tj += u' ' + item.author\n\t\t\t\n\t\tif 'Content' in fields:\n\t\t\tj += u' ' + item.content\n\t\t\t\n\t\tif 'Tags' in fields:\n\t\t\tj += u' ' + tags_join(item.tag_list())\n\t\t\t\n\t\treturn j\n\t\t\nimport re\n\nclass WikklyQueryWords(WikklyQueryBase):\n\t\"\"\"\n\tQuery by performing a word search.\n\t\n\t'words' is the list of words.\n\tSet *ONE* of the 'op_' args to True:\n\t\top_and: Require ALL words to match.\n\t\top_or: Require ANY of the words to match.\n\t\t\n\t'no_case' = True/False to make search case-insensitive.\n\t\"\"\"\n\tdef __init__(self, words, op_and=False, op_or=False, no_case=True, fields=WikklyQueryAllFields):\n\t\tself.words = words\n\t\tself.fields = fields\n\t\tself.op_and = op_and\n\t\tself.op_or = op_or\n\t\tself.no_case = no_case\n\t\n\tdef explain(self):\n\t\t\"Return a text description of self as wikitext.\"\n\t\td = u''\n\t\tif self.op_and:\n\t\t\td += u\"Searched for ''all'' of the words: \"\n\t\telif self.op_or:\n\t\t\td += u\"Searched for ''any'' of the words: \"\n\t\telse:\n\t\t\treturn u'@@ERROR - Bad query@@'\n\t\t\t\n\t\twords = [('\"%s\"' % w) for w in self.words]\n\t\td += u\", \".join(words)\n\t\td += u\"
\"\n\t\t\n\t\tfields = [('\"%s\"' % f) for f in self.fields]\n\t\td += u\"In fields: %s
\" % (u\", \".join(fields))\n\t\t\n\t\tif self.no_case:\n\t\t\td += u\"//Case ''insensitive''//\"\n\t\telse:\n\t\t\td += u\"//Case ''sensitive''//\"\n\t\t\t\n\t\treturn d\n\t\t\n\tdef match(self, item):\n\t\tjoined = self.fieldjoin(item, self.fields)\n\t\tif self.no_case:\n\t\t\tjoined = joined.lower()\n\t\t\t\n\t\t#print \"WORD SEARCH in JOINED\",repr(joined)\n\t\t#print \"AND: %s, OR: %s\" % (str(self.op_and), str(self.op_or))\n\t\t\n\t\tif not self.op_and and not self.op_or:\n\t\t\treturn False # assumed fail if no-op given\n\t\t\t\n\t\tfor word in self.words:\n\t\t\tif self.no_case:\n\t\t\t\tword = word.lower()\n\t\t\t\t\n\t\t\t#print \"WORD\",word\n\t\t\tif self.op_and and joined.find(word) < 0:\n\t\t\t\t#print \"FAILED 'AND'\"\n\t\t\t\treturn False # one failed to match\n\t\t\telif self.op_or and joined.find(word) >= 0:\n\t\t\t\t#print \"PASSED 'OR'\"\n\t\t\t\treturn True # one matched\n\t\t\t\n\t\tif self.op_and:\n\t\t\t#print \"PASSED 'AND'\"\n\t\t\treturn True # all matched\n\t\telse: # only other option is op_or from logic above\n\t\t\t#print \"FAILED 'OR'\"\n\t\t\treturn False # none matched\n\t\t\t\nclass WikklyQueryRegex(WikklyQueryBase):\n\t\"\"\"\n\tQuery store using a regular expression.\n\t\n\t'regex' is the raw (uncompiled) regular expression.\n\t'fields' are the field names to include in the search.\n\t're_flags' are the 're.compile()' flags if you want to set your own.\n\t\"\"\"\n\tdef __init__(self, regex, fields=WikklyQueryAllFields, re_flags=re.M|re.I):\n\t\tself.regex = re.compile(regex, re_flags)\n\t\tself.fields = fields\n\t\t\n\tdef match(self, item):\n\t\tif self.regex.search(self.fieldjoin(item, self.fields)):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\t\t\n#------------------------------------------------------------\n# Internal API - used by various stores\n#------------------------------------------------------------\n\ndef generic_query_store(store, query):\n\t\"\"\"\n\tRun the specified query on the store, returning a list\n\tof matching items. This is for use by stores that don't\n\twant to provide a more optimized version.\n\t\"\"\"\n\treturn [item for item in store.getall() if query.match(item)]\n\t\n\t\n","sub_path":"venv/Lib/site-packages/wikklytext/store/wikQuery.py","file_name":"wikQuery.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"125210843","text":"'''\r\n@author: Faizan-Uni-Stuttgart\r\n\r\nNov 12, 2020\r\n\r\n7:00:48 PM\r\n\r\n'''\r\nimport os\r\nimport time\r\nimport timeit\r\nfrom pathlib import Path\r\n\r\nfrom spinterps import Extract\r\n\r\nDEBUG_FLAG = True\r\n\r\n\r\ndef main():\r\n\r\n main_dir = Path(r'P:\\Synchronize\\IWS\\Testings\\fourtrans_practice\\multisite_phs_spec_corr\\precipitation_kriging\\orig')\r\n os.chdir(main_dir)\r\n\r\n path_to_shp = r'P:\\Synchronize\\IWS\\QGIS_Neckar\\raster\\taudem_out_spate_rockenau\\watersheds_all.shp'\r\n\r\n label_field = r'DN'\r\n\r\n path_to_ras = r'precipitation_kriging_1km_orig.nc'\r\n input_ras_type = 'nc'\r\n\r\n# path_to_ras = r'P:\\Synchronize\\IWS\\Colleagues_Students\\Mischa\\lulc_geohyd_ratio_rasters\\lower_de_gauss_z3_1km_hydrogeol_einheit_nr_hydmod_lulc_ratios.tif'\r\n# input_ras_type = 'gtiff'\r\n\r\n nc_x_crds_label = 'X'\r\n nc_y_crds_label = 'Y'\r\n nc_variable_labels = ['OK']\r\n nc_time_label = 'time'\r\n\r\n src_epsg = None\r\n dst_epsg = None\r\n\r\n# src_epsg = 4326\r\n# dst_epsg = 31467\r\n\r\n# main_dir = Path(r'P:\\Downloads\\spinterp_2d_nc_crds_test')\r\n# os.chdir(main_dir)\r\n#\r\n# path_to_shp = r'01Small.shp'\r\n#\r\n# label_field = r'Id'\r\n#\r\n# path_to_ras = r'pr_SAM-44_ICHEC-EC-EARTH_historical_r12i1p1_SMHI-RCA4_v3_day_19810101-19851231.nc'\r\n# input_ras_type = 'nc'\r\n#\r\n# nc_x_crds_label = 'lon'\r\n# nc_y_crds_label = 'lat'\r\n# nc_variable_labels = ['pr']\r\n# nc_time_label = 'time'\r\n\r\n path_to_output = Path(r'neckar_ppt_1991_1991.h5')\r\n# path_to_output = 'lower_de_gauss_z3_1km_hydrogeol_einheit_nr_hydmod_lulc_ratios.h5'\r\n\r\n Ext = Extract(True)\r\n\r\n res = None\r\n\r\n if input_ras_type == 'gtiff':\r\n res = Ext.extract_from_geotiff(\r\n path_to_shp,\r\n label_field,\r\n path_to_ras,\r\n path_to_output,\r\n src_epsg,\r\n dst_epsg)\r\n\r\n elif input_ras_type == 'nc':\r\n res = Ext.extract_from_netCDF(\r\n path_to_shp,\r\n label_field,\r\n path_to_ras,\r\n path_to_output,\r\n nc_x_crds_label,\r\n nc_y_crds_label,\r\n nc_variable_labels,\r\n nc_time_label,\r\n src_epsg,\r\n dst_epsg)\r\n\r\n else:\r\n raise NotImplementedError\r\n\r\n print('\\n')\r\n print('res:', res)\r\n\r\n return\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n _save_log_ = False\r\n if _save_log_:\r\n from datetime import datetime\r\n from std_logger import StdFileLoggerCtrl\r\n\r\n # save all console activity to out_log_file\r\n out_log_file = os.path.join(\r\n r'P:\\Synchronize\\python_script_logs\\\\%s_log_%s.log' % (\r\n os.path.basename(__file__),\r\n datetime.now().strftime('%Y%m%d%H%M%S')))\r\n\r\n log_link = StdFileLoggerCtrl(out_log_file)\r\n\r\n print('#### Started on %s ####\\n' % time.asctime())\r\n START = timeit.default_timer()\r\n\r\n #==========================================================================\r\n # When in post_mortem:\r\n # 1. \"where\" to show the stack\r\n # 2. \"up\" move the stack up to an older frame\r\n # 3. \"down\" move the stack down to a newer frame\r\n # 4. \"interact\" start an interactive interpreter\r\n #==========================================================================\r\n\r\n if DEBUG_FLAG:\r\n try:\r\n main()\r\n\r\n except:\r\n import pdb\r\n pdb.post_mortem()\r\n\r\n else:\r\n main()\r\n\r\n STOP = timeit.default_timer()\r\n print(('\\n#### Done with everything on %s.\\nTotal run time was'\r\n ' about %0.4f seconds ####' % (time.asctime(), STOP - START)))\r\n\r\n if _save_log_:\r\n log_link.stop()\r\n","sub_path":"ft_interp/v4/04_polygon_extraction.py","file_name":"04_polygon_extraction.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"451663516","text":"# coding: utf-8\n\n\"\"\"\n Alfresco Content Services REST API\n\n **Core API** Provides access to the core features of Alfresco Content Services. # noqa: E501\n\n The version of the OpenAPI document: 1\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom openapi_client.configuration import Configuration\n\n\nclass PersonNetwork(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'id': 'str',\n 'home_network': 'bool',\n 'is_enabled': 'bool',\n 'created_at': 'datetime',\n 'paid_network': 'bool',\n 'subscription_level': 'str',\n 'quotas': 'list[NetworkQuota]'\n }\n\n attribute_map = {\n 'id': 'id',\n 'home_network': 'homeNetwork',\n 'is_enabled': 'isEnabled',\n 'created_at': 'createdAt',\n 'paid_network': 'paidNetwork',\n 'subscription_level': 'subscriptionLevel',\n 'quotas': 'quotas'\n }\n\n def __init__(self, id=None, home_network=None, is_enabled=None, created_at=None, paid_network=None, subscription_level=None, quotas=None, local_vars_configuration=None): # noqa: E501\n \"\"\"PersonNetwork - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._id = None\n self._home_network = None\n self._is_enabled = None\n self._created_at = None\n self._paid_network = None\n self._subscription_level = None\n self._quotas = None\n self.discriminator = None\n\n self.id = id\n if home_network is not None:\n self.home_network = home_network\n self.is_enabled = is_enabled\n if created_at is not None:\n self.created_at = created_at\n if paid_network is not None:\n self.paid_network = paid_network\n if subscription_level is not None:\n self.subscription_level = subscription_level\n if quotas is not None:\n self.quotas = quotas\n\n @property\n def id(self):\n \"\"\"Gets the id of this PersonNetwork. # noqa: E501\n\n This network's unique id # noqa: E501\n\n :return: The id of this PersonNetwork. # noqa: E501\n :rtype: str\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this PersonNetwork.\n\n This network's unique id # noqa: E501\n\n :param id: The id of this PersonNetwork. # noqa: E501\n :type: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id\n\n @property\n def home_network(self):\n \"\"\"Gets the home_network of this PersonNetwork. # noqa: E501\n\n Is this the home network? # noqa: E501\n\n :return: The home_network of this PersonNetwork. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._home_network\n\n @home_network.setter\n def home_network(self, home_network):\n \"\"\"Sets the home_network of this PersonNetwork.\n\n Is this the home network? # noqa: E501\n\n :param home_network: The home_network of this PersonNetwork. # noqa: E501\n :type: bool\n \"\"\"\n\n self._home_network = home_network\n\n @property\n def is_enabled(self):\n \"\"\"Gets the is_enabled of this PersonNetwork. # noqa: E501\n\n\n :return: The is_enabled of this PersonNetwork. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._is_enabled\n\n @is_enabled.setter\n def is_enabled(self, is_enabled):\n \"\"\"Sets the is_enabled of this PersonNetwork.\n\n\n :param is_enabled: The is_enabled of this PersonNetwork. # noqa: E501\n :type: bool\n \"\"\"\n if self.local_vars_configuration.client_side_validation and is_enabled is None: # noqa: E501\n raise ValueError(\"Invalid value for `is_enabled`, must not be `None`\") # noqa: E501\n\n self._is_enabled = is_enabled\n\n @property\n def created_at(self):\n \"\"\"Gets the created_at of this PersonNetwork. # noqa: E501\n\n\n :return: The created_at of this PersonNetwork. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._created_at\n\n @created_at.setter\n def created_at(self, created_at):\n \"\"\"Sets the created_at of this PersonNetwork.\n\n\n :param created_at: The created_at of this PersonNetwork. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._created_at = created_at\n\n @property\n def paid_network(self):\n \"\"\"Gets the paid_network of this PersonNetwork. # noqa: E501\n\n\n :return: The paid_network of this PersonNetwork. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._paid_network\n\n @paid_network.setter\n def paid_network(self, paid_network):\n \"\"\"Sets the paid_network of this PersonNetwork.\n\n\n :param paid_network: The paid_network of this PersonNetwork. # noqa: E501\n :type: bool\n \"\"\"\n\n self._paid_network = paid_network\n\n @property\n def subscription_level(self):\n \"\"\"Gets the subscription_level of this PersonNetwork. # noqa: E501\n\n\n :return: The subscription_level of this PersonNetwork. # noqa: E501\n :rtype: str\n \"\"\"\n return self._subscription_level\n\n @subscription_level.setter\n def subscription_level(self, subscription_level):\n \"\"\"Sets the subscription_level of this PersonNetwork.\n\n\n :param subscription_level: The subscription_level of this PersonNetwork. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"Free\", \"Standard\", \"Enterprise\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and subscription_level not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `subscription_level` ({0}), must be one of {1}\" # noqa: E501\n .format(subscription_level, allowed_values)\n )\n\n self._subscription_level = subscription_level\n\n @property\n def quotas(self):\n \"\"\"Gets the quotas of this PersonNetwork. # noqa: E501\n\n\n :return: The quotas of this PersonNetwork. # noqa: E501\n :rtype: list[NetworkQuota]\n \"\"\"\n return self._quotas\n\n @quotas.setter\n def quotas(self, quotas):\n \"\"\"Sets the quotas of this PersonNetwork.\n\n\n :param quotas: The quotas of this PersonNetwork. # noqa: E501\n :type: list[NetworkQuota]\n \"\"\"\n\n self._quotas = quotas\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, PersonNetwork):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, PersonNetwork):\n return True\n\n return self.to_dict() != other.to_dict()\n","sub_path":"openapi_client/models/person_network.py","file_name":"person_network.py","file_ext":"py","file_size_in_byte":8552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"376439108","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 16 14:34:46 2018\n\n@author: zarito\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# Imports\nimport numpy as np\nimport tensorflow as tf\nimport time\nimport h5py\nimport random as rand\nimport sys\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n#Shuffle order of training examples\nindices = np.load(\"work/train_indices.npy\")\ntest_indices = np.load(\"work/test_indices.npy\")\ntrain_indices = indices[2048:]\nvalidation_indices = indices[0:2048]\n\n#Constants\nTRAINING_EPOCHS = 1000 #How many passes over the training data during training\nBATCH_SIZE = int(1)\nN_BATCHES = int(sys.argv[2]) #Number of batches before applying gradient\nEPOCH = int(len(train_indices)) #5*2^11, 5^2*2 Training steps per epoch\nDECAY = 0.5 \nSUMMARIES_PER_EPOCH = int(64)\nCKPTS_PER_EPOCH = 16\nFILTERS = int(sys.argv[4])\nEPOCH_STEPS = int(EPOCH/(BATCH_SIZE*N_BATCHES))\nDECAY_STEPS = int(EPOCH_STEPS/2)\nTRAINING_STEPS = int(TRAINING_EPOCHS*EPOCH_STEPS)\nSUMMARY_STEPS = int(EPOCH_STEPS/SUMMARIES_PER_EPOCH)\nCKPT_STEPS = int(EPOCH_STEPS/CKPTS_PER_EPOCH)\nCONCAT = int(sys.argv[3])\nLOGDIR = \"work/unet14_8/\" #Training directory\n\n\nH5KEY = \"/entry_1/image_1/data\"\nHEIGHT = 388\nWIDTH = 370\nNORM_FACTOR = 110657\nf = h5py.File(name=\"/scratch/fhgfs/cnettel/bkg/cxi73013-r0399.cxi\", mode=\"r\")\nfkey = f[H5KEY] #(12000, 390, 390)\nh5dataset = np.array(fkey[:,0:388,9:379], dtype=np.float32)\n\n\nmean_image = np.load(\"work/mean_old_set.npy\") #Scalar mean of images\nstd_image = np.load(\"work/std_old_set.npy\") #Scalar std of images\nmean_image_153 = np.load(\"work/mean_image_old_set.npy\") #Mean image (HEIGHT, WIDTH)\n\n#Load linear fit\nX_load = np.load(\"work/X10_lr_3.4E-01_lmbda_2.9E-09_lmbda2_7.0E-10.npy\")\nB_load = np.load(\"work/B10_lr_3.4E-01_lmbda_2.9E-09_lmbda2_7.0E-10.npy\")\nk_load = np.load(\"work/k10_lr_3.4E-01_lmbda_2.9E-09_lmbda2_7.0E-10.npy\")\n\nDIV_FACTOR = std_image #Used \n\n\ndef main():\n print(\"Saving to \" + LOGDIR)\n ae_model()\n\ndef batch_read(indices):\n batch_images = f[H5KEY][indices].astype(\"float32\")\n return batch_images\n\n#Tensorflow model \ndef ae_model():\n config=tf.ConfigProto(log_device_placement=True)\n tf.reset_default_graph()\n sess = tf.Session(config=config)\n \n #Randomly generate hyperparameters\n #Learning rate\n learning_rate_pow = tf.Variable(tf.random_uniform([], minval=-4, maxval=-2.5),trainable=False)\n #Lambda - regularization on image bias\n lmbda_pow = tf.Variable(tf.random_uniform([], minval=-2, maxval=1),trainable=False)\n \n #Hyper parameters for adam optimizer\n beta1_pow = tf.Variable(tf.random_uniform([], minval=-2.5, maxval=-0.5),trainable=False)\n beta2_pow = tf.Variable(tf.random_uniform([], minval=-5, maxval=-2.5),trainable=False)\n epsilon_pow = tf.Variable(tf.random_uniform([], minval=-9, maxval=-7),trainable=False)\n \n learning_rate = tf.pow(tf.constant(10.0), learning_rate_pow)\n beta1=1 - tf.pow(tf.constant(10.0), beta1_pow)\n beta2=1 - tf.pow(tf.constant(10.0), beta2_pow)\n epsilon=tf.pow(tf.constant(10.0), epsilon_pow)\n lmbda = tf.pow(tf.constant(10.0), lmbda_pow)\n \n ##############################\n #Creating diffraction image\n sample = tf.abs(tf.random_normal([40,40]))\n paddings = tf.constant([[1004,1004],[1004,1004]])\n sample_padded = tf.pad(sample, paddings)\n sample_fft = tf.fft2d(tf.cast(sample_padded, dtype=tf.complex64))\n intensity = tf.abs(tf.truncated_normal([],stddev=0.5))\n sample_fft = ( tf.pow(tf.cast(tf.abs(sample_fft),dtype=tf.float32),2)/2429.0)*intensity\n sample_fft = tf.expand_dims(sample_fft, 0)\n sample_fft = tf.expand_dims(sample_fft, -1)\n tf.summary.image(\"sample_fft_pow\", sample_fft, max_outputs=3)\n \n #FFT shift\n q1 = tf.slice(sample_fft, [0,0,1024,0], [1,1024,1024,1])\n q2 = tf.slice(sample_fft, [0,0,0,0], [1,1024,1024,1])\n q3 = tf.slice(sample_fft, [0,1024,0,0], [1,1024,1024,1])\n q4 = tf.slice(sample_fft, [0,1024,1024,0], [1,1024,1024,1])\n \n sub_concat1 = tf.concat([q4, q1], 1)\n sub_concat2 = tf.concat([q3, q2], 1)\n sample_fft_shift = tf.concat([sub_concat1, sub_concat2], 2)\n tf.summary.image(\"sample_fft_shift\", sample_fft_shift, max_outputs=3)\n tf.summary.scalar(\"sample_max\", tf.reduce_max(sample_fft_shift))\n tf.summary.scalar(\"sample_mean\", tf.reduce_mean(sample_fft_shift))\n x_shift = tf.cast(tf.round(tf.truncated_normal([],stddev=5.0)), dtype=tf.int64)\n y_shift = tf.cast(tf.round(tf.truncated_normal([],stddev=5.0)), dtype=tf.int64)\n sample_crop = tf.slice(sample_fft_shift, [0,1024 - int(388/2) + 13 + y_shift, 1024 - int(370/2) + 15 + x_shift,0], [1,388,370,1])\n tf.summary.image(\"sample_crop\", sample_crop, max_outputs=3)\n tf.summary.scalar(\"sample_crop_max\", tf.reduce_max(sample_crop))\n tf.summary.scalar(\"sample_crop_mean\", tf.reduce_mean(sample_crop))\n \n sample_crop_psn_int = tf.random_poisson(sample_crop, [])\n sample_crop_psn = tf.multiply(sample_crop_psn_int, 2*tf.random_normal([1,HEIGHT,WIDTH,1])+25)\n tf.summary.image(\"sample_crop_psn\", sample_crop_psn, max_outputs=3)\n tf.summary.scalar(\"sample_crop_psn_max\", tf.reduce_max(sample_crop_psn))\n tf.summary.scalar(\"sample_crop_psn_mean\", tf.reduce_mean(sample_crop_psn))\n ###################################\n \n #Pixel weigting matrix\n xpts, ypts = np.meshgrid(np.linspace(-1,1,450), np.linspace(-1,1,450))\n dpts = np.sqrt(xpts*xpts+ypts*ypts)\n sigma, mu = 0.5, 0.0\n loss_weights = np.exp(-( (dpts-mu)**2 / ( 2.0 * sigma**2 ) ) )\n y_disp = 11\n x_disp = 15\n loss_weights = loss_weights[(31 + y_disp):(31 + y_disp + 388), (40 + x_disp):(40 + x_disp + 370)]\n loss_weights = np.expand_dims(loss_weights, 0)\n loss_weights = np.expand_dims(loss_weights, 3)\n \n #Create circle part of mask\n a1, b1 = 181, 170\n n1,n2 = 388,370\n r = 78\n y,x = np.ogrid[-a1:n1-a1, -b1:n2-b1]\n mask = x*x + y*y > r*r\n \n #Masking \"bad\" pixels\n #Abnornmal mean >1900 far from high intensity\n mask[148:211,:] = False\n mask[274,80] = False\n mask[103, 254] = False\n mask[123, 60] = False\n mask[290, 204] = False\n mask[142, 68] = False\n mask[268, 145] = False\n mask[102, 254] = False\n mask[291, 204] = False\n mask[290, 205] = False\n mask[268, 146] = False\n mask[128, 368] = False\n \n #Abnormal std relative to adjacent pixels\n mask[27, 188] = False\n mask[112, 12] = False\n mask[112, 12] = False\n mask[315, 194] = False\n mask[328, 194] = False\n mask[336, 187] = False\n mask[377, 189] = False\n mask[315, 186] = False\n mask[327, 189] = False\n mask[350, 187] = False\n mask[374, 188] = False\n mask[336, 202] = False\n mask[367, 186] = False\n mask[310, 187] = False\n mask[345, 185] = False\n mask[342, 200] = False\n mask[351, 188] = False\n mask = mask.reshape((1,388,370,1))\n \n n_batches = tf.placeholder(tf.float32, shape=(), name=\"n_batches\")\n photon_sum_in = tf.reduce_sum(sample_crop_psn_int*mask)\n tf.summary.scalar(\"photon_count\", photon_sum_in) \n ####ENCODER####\n ###################################\n\n #Clean image\n with tf.variable_scope(\"clean\"):\n y = tf.placeholder(tf.float32, shape=[BATCH_SIZE,HEIGHT,WIDTH], name=\"y\")\n \n y_im = tf.reshape(y, [BATCH_SIZE, HEIGHT, WIDTH, 1])*mask\n y_image = (y_im - mean_image)/std_image\n y_image = y_image*mask\n \n tf.summary.histogram(\"clean\", y_image)\n \n #Clean image with added diffraction\n with tf.variable_scope(\"input\"):\n x_image = ((y_im + sample_crop_psn) - mean_image)/std_image\n x_image=x_image*mask\n tf.summary.histogram(\"input\", x_image)\n\n #Calculate bias\n y_image_bias = tf.reduce_sum(y_image)/NORM_FACTOR\n\n #Add padding for nicer up and downsampling\n pu=30\n pd=30\n pl=39\n pr=39\n paddings = tf.constant([[0,0], [pu,pd], [pl,pr], [0,0]])\n x_image_pad = tf.pad(x_image,paddings)\n \n mean_gt = tf.constant(mean_image_153, dtype=tf.float32, shape=[1,HEIGHT,WIDTH,1])\n X_fit = tf.constant(X_load, dtype=tf.float32, shape=[1,HEIGHT,WIDTH,1])\n B_fit = tf.constant(B_load, dtype=tf.float32, shape=[1,HEIGHT,WIDTH,1])\n X_sum = tf.reduce_sum(X_fit)\n B_sum = tf.reduce_sum(B_fit)\n \n sample_k = tf.placeholder(tf.float32, shape=(1,1), name=\"sample_k\")\n smpl_k = tf.reshape(sample_k, ())\n \n #Calculating fit using known background data\n fit1 = X_fit*smpl_k + B_fit\n fit1_out = tf.maximum(tf.round((y_im + sample_crop_psn - fit1)/25), 0)*mask\n tf.summary.image(\"fit1_out\", fit1_out, max_outputs=3)\n diff_fit1 = tf.abs(fit1_out - sample_crop_psn_int)*mask\n diff_photons_fit1_pos=tf.maximum(diff_fit1, 0)\n diff_photons_fit1_neg=tf.abs(tf.minimum(diff_fit1, 0))\n diff_photons_fit1 = tf.concat([diff_photons_fit1_neg,diff_photons_fit1_pos, tf.zeros_like(diff_photons_fit1_neg)], axis=3)\n dp_sum_fit1 = tf.reduce_sum(diff_photons_fit1)\n tf.summary.scalar(\"diff_photons_out_fit1\", dp_sum_fit1)\n tf.summary.scalar(\"dpo_over_photons_in_fit1\", dp_sum_fit1/photon_sum_in)\n tf.summary.image(\"diff_photons_fit1_color\", diff_photons_fit1*mask, max_outputs=3) \n \n #Calculating fit without known background\n k_fit2 = (tf.reduce_sum((y_im + sample_crop_psn)*mask) - B_sum)/X_sum\n fit2 = k_fit2*X_fit + B_fit\n fit2_out = tf.maximum(tf.round((y_im + sample_crop_psn - fit2)/25), 0)*mask\n tf.summary.image(\"fit2_out\", fit2_out, max_outputs=3)\n diff_fit2 = tf.abs(fit2_out - sample_crop_psn_int)*mask\n diff_photons_fit2_pos=tf.maximum(diff_fit2, 0)\n diff_photons_fit2_neg=tf.abs(tf.minimum(diff_fit2, 0))\n diff_photons_fit2 = tf.concat([diff_photons_fit2_neg,diff_photons_fit2_pos, tf.zeros_like(diff_photons_fit2_neg)], axis=3)\n dp_sum_fit2 = tf.reduce_sum(diff_photons_fit2)\n tf.summary.scalar(\"diff_photons_out_fit2\", dp_sum_fit2)\n tf.summary.image(\"diff_photons_fit2_color\", diff_photons_fit2*mask, max_outputs=3) \n \n #Calculating ADU sums of different methods for summaries\n sum_actual = tf.reduce_sum(y_im)\n sum_gt = tf.reduce_sum(mean_image_153)\n sum_fit1 = tf.reduce_sum(fit1*mask)\n tf.summary.scalar(\"sum_actual\", sum_actual)\n tf.summary.scalar(\"sum_gt\", sum_gt)\n tf.summary.scalar(\"sum_fit1\", sum_fit1)\n\n\n mean_gt_bias = tf.reduce_sum((mean_gt-mean_image)/std_image)/NORM_FACTOR\n\n \n #######################################################################\n #AUTOENCODER\n \n i = 1\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc1 = loc_layer(inputs=x_image_pad, name=str(i), n_channels=FILTERS)\n tf.summary.histogram(\"loc\"+str(i), loc1)\n i = 2\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc2 = loc_layer(inputs=loc1, name=str(i), n_channels=FILTERS)\n tf.summary.histogram(\"loc\"+str(i), loc2)\n \n pool1 = tf.layers.average_pooling2d(loc2, pool_size=2,strides=2,padding='same')\n \n#################################\n \n i = 3\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc3 = loc_layer(inputs=pool1, name=str(i), n_channels=FILTERS)\n tf.summary.histogram(\"loc\"+str(i), loc3)\n i = 4\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc4 = loc_layer(inputs=loc3, name=str(i), n_channels=FILTERS) + pool1\n tf.summary.histogram(\"loc\"+str(i), loc4)\n \n pool2 = tf.layers.average_pooling2d(loc4, pool_size=2,strides=2,padding='same')\n \n################################# \n\n i = 5\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc5 = loc_layer(inputs=pool2, name=str(i), n_channels=FILTERS)\n tf.summary.histogram(\"loc\"+str(i), loc5)\n i = 6\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc6 = loc_layer(inputs=loc5, name=str(i), n_channels=FILTERS) + pool2\n tf.summary.histogram(\"loc\"+str(i), loc6)\n \n pool3 = tf.layers.average_pooling2d(loc6, pool_size=2,strides=2,padding='same')\n \n################################# \n \n i = 7\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc7 = loc_layer(inputs=pool3, name=str(i), n_channels=FILTERS)\n tf.summary.histogram(\"loc\"+str(i), loc7)\n i = 8\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc8 = loc_layer(inputs=loc7, name=str(i), n_channels=FILTERS) + pool3\n tf.summary.histogram(\"loc\"+str(i), loc8)\n \n pool4 = tf.layers.average_pooling2d(loc8, pool_size=2,strides=2,padding='same')\n \n################################# \n \n i = 9\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc9 = loc_layer(inputs=pool4, name=str(i), n_channels=FILTERS)\n tf.summary.histogram(\"loc\"+str(i), loc9)\n i = 10\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc10 = loc_layer(inputs=loc9, name=str(i), n_channels=FILTERS) + pool4\n tf.summary.histogram(\"loc\"+str(i), loc10)\n \n pool5 = tf.layers.average_pooling2d(loc10, pool_size=2,strides=2,padding='same')\n \n################################# \n i = 11\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc11 = loc_layer(inputs=pool5, name=str(i), n_channels=FILTERS)\n tf.summary.histogram(\"loc\"+str(i), loc11)\n i = 12\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc12 = loc_layer(inputs=loc11, name=str(i), n_channels=FILTERS) + pool5\n tf.summary.histogram(\"loc\"+str(i), loc12)\n \n pool6 = tf.layers.average_pooling2d(loc12, pool_size=2,strides=2,padding='same')\n \n#################################\n################################# \n \n \n i = 13\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc13 = loc_layer(inputs=pool6, name=str(i), n_channels=FILTERS)\n tf.summary.histogram(\"loc\"+str(i), loc13)\n i = 14\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc14 = loc_layer(inputs=loc13, name=str(i), n_channels=FILTERS) + pool6\n tf.summary.histogram(\"loc\"+str(i), loc14)\n \n upsample1_ = tf.image.resize_images(loc14, size=(int(448/2**5),int(448/2**5)), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n \n#################################\n if CONCAT == 0:\n upsample1 = upsample1_ + loc12\n else:\n upsample1 = tf.concat([upsample1_,loc12],axis=3)\n\n i = 15\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc15 = loc_layer(inputs=upsample1, name=str(i), n_channels=FILTERS)\n tf.summary.histogram(\"loc\"+str(i), loc15)\n i = 16\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc16 = loc_layer(inputs=loc15, name=str(i), n_channels=FILTERS) + upsample1_\n tf.summary.histogram(\"loc\"+str(i), loc16)\n \n upsample2_ = tf.image.resize_images(loc16, size=(int(448/2**4),int(448/2**4)), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n \n################################# \n if CONCAT == 0:\n upsample2 = upsample2_ + loc10\n else:\n upsample2 = tf.concat([upsample2_,loc10],axis=3)\n\n i = 17\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc17 = loc_layer(inputs=upsample2, name=str(i), n_channels=FILTERS)\n tf.summary.histogram(\"loc\"+str(i), loc17)\n i = 18\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc18 = loc_layer(inputs=loc17, name=str(i), n_channels=FILTERS) + upsample2_\n tf.summary.histogram(\"loc\"+str(i), loc18)\n \n upsample3_ = tf.image.resize_images(loc18, size=(int(448/2**3),int(448/2**3)), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n \n################################# \n if CONCAT == 0:\n upsample3 = upsample3_ + loc8\n else:\n upsample3 = tf.concat([upsample3_,loc8],axis=3)\n\n i = 19\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc19 = loc_layer(inputs=upsample3, name=str(i), n_channels=FILTERS)\n tf.summary.histogram(\"loc\"+str(i), loc19)\n i = 20\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc20 = loc_layer(inputs=loc19, name=str(i), n_channels=FILTERS) + upsample3_\n tf.summary.histogram(\"loc\"+str(i), loc20)\n \n upsample4_ = tf.image.resize_images(loc20, size=(int(448/2**2),int(448/2**2)), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n \n################################# \n if CONCAT == 0:\n upsample4 = upsample4_ + loc6\n else:\n upsample4 = tf.concat([upsample4_,loc6],axis=3)\n\n i = 21\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc21 = loc_layer(inputs=upsample4, name=str(i), n_channels=FILTERS)\n tf.summary.histogram(\"loc\"+str(i), loc21)\n i = 22\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc22 = loc_layer(inputs=loc21, name=str(i), n_channels=FILTERS) + upsample4_\n tf.summary.histogram(\"loc\"+str(i), loc22)\n \n upsample5_ = tf.image.resize_images(loc22, size=(int(448/2),int(448/2)), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n \n################################# \n if CONCAT == 0:\n upsample5 = upsample5_ + loc4\n else:\n upsample5 = tf.concat([upsample5_,loc4],axis=3)\n\n i = 23\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc23 = loc_layer(inputs=upsample5, name=str(i), n_channels=FILTERS)\n tf.summary.histogram(\"loc\"+str(i), loc23)\n i = 24\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc24 = loc_layer(inputs=loc23, name=str(i), n_channels=FILTERS) + upsample5_\n tf.summary.histogram(\"loc\"+str(i), loc24)\n \n upsample6_ = tf.image.resize_images(loc24, size=(448,448), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n \n################################# \n if CONCAT == 0:\n upsample6 = upsample6_ + loc2\n else:\n upsample6 = tf.concat([upsample6_,loc2],axis=3)\n \n i = 25\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n loc25 = loc_layer(inputs=upsample6, name=str(i), n_channels=FILTERS)\n tf.summary.histogram(\"loc\"+str(i), loc25)\n \n i = 26\n with tf.variable_scope(\"loc\"+str(i), reuse=tf.AUTO_REUSE):\n decoded = loc_layer(inputs=loc25, name=str(i), n_channels=1, activation=False)\n tf.summary.histogram(\"loc\"+str(i), decoded) \n \n \n decoded = tf.slice(decoded, [0, pu, pl, 0], [-1, HEIGHT, WIDTH, -1])\n decoded = decoded*mask\n\n decoded_bias = tf.reduce_sum(decoded)/NORM_FACTOR\n \n \n ############################################################################\n #Creating various summaries\n \n tf.summary.image(\"input\", x_image, max_outputs=3)\n tf.summary.image(\"output\", decoded, max_outputs=3)\n \n input_restored = (y_image*std_image+mean_image)*mask\n output_restored = (decoded*std_image+mean_image)*mask\n output_psn = tf.maximum(((y_im + sample_crop_psn) - output_restored)*mask,0)\n output_mean_psn = tf.maximum(((y_im + sample_crop_psn) - mean_gt)*mask,0)\n tf.summary.image(\"sample_crop_psn_out\", output_psn*mask, max_outputs=3)\n tf.summary.image(\"sample_crop_psn_out_gt\", output_mean_psn*mask, max_outputs=3)\n\n #photons\n tf.summary.image(\"photons_in\", sample_crop_psn_int*mask, max_outputs=3)\n tf.summary.image(\"photons_out\", tf.round(output_psn/25)*mask, max_outputs=3)\n tf.summary.image(\"photons_out_gt\", tf.round(output_mean_psn/25)*mask, max_outputs=3)\n tf.summary.image(\"photons_out_fit1\", fit1_out, max_outputs=3)\n tf.summary.image(\"photons_out_fit2\", fit2_out, max_outputs=3)\n \n \n dpo=tf.round(output_psn/25)-sample_crop_psn_int\n dpo_abs_3=tf.abs(tf.layers.average_pooling2d(dpo*mask, (3,3),(3,3)))\n dpo_abs_9=tf.abs(tf.layers.average_pooling2d(dpo*mask, (9,9),(9,9)))\n tf.summary.image(\"dpo_abs_3\", dpo_abs_3, max_outputs=3)\n tf.summary.image(\"dpo_abs_9\", dpo_abs_9, max_outputs=3)\n tf.summary.scalar(\"dpo_abs_3\", tf.reduce_sum(dpo_abs_3))\n tf.summary.scalar(\"dpo_abs_9\", tf.reduce_sum(dpo_abs_9))\n diff_photons_out_pos=tf.maximum(dpo, 0)*mask\n diff_photons_out_neg=tf.abs(tf.minimum(dpo, 0)*mask)\n diff_photons_out = tf.concat([diff_photons_out_neg,diff_photons_out_pos, tf.zeros_like(diff_photons_out_neg)], axis=3)\n dp_sum=tf.reduce_sum(diff_photons_out)\n tf.summary.scalar(\"dpo_over_photons_in\", dp_sum/photon_sum_in)\n tf.summary.image(\"diff_photons_out_color\", diff_photons_out*mask, max_outputs=3)\n tf.summary.scalar(\"diff_photons_out\", dp_sum)\n \n dpo_gt = tf.round(output_mean_psn/25) - sample_crop_psn_int\n dpo_abs_3_gt=tf.abs(tf.layers.average_pooling2d(dpo_gt*mask, (3,3),(3,3)))\n dpo_abs_9_gt=tf.abs(tf.layers.average_pooling2d(dpo_gt*mask, (9,9),(9,9)))\n tf.summary.image(\"dpo_abs_3_gt\", dpo_abs_3_gt, max_outputs=3)\n tf.summary.image(\"dpo_abs_9_gt\", dpo_abs_9_gt, max_outputs=3)\n tf.summary.scalar(\"dpo_abs_3_gt\", tf.reduce_sum(dpo_abs_3_gt))\n tf.summary.scalar(\"dpo_abs_9_gt\", tf.reduce_sum(dpo_abs_9_gt))\n diff_photons_out_pos_gt=tf.maximum(dpo_gt, 0)*mask\n diff_photons_out_neg_gt=tf.abs(tf.minimum(dpo_gt, 0)*mask)\n diff_photons_out_gt = tf.concat([diff_photons_out_neg_gt,diff_photons_out_pos_gt, tf.zeros_like(diff_photons_out_neg_gt)], axis=3)\n dp_sum_gt=tf.reduce_sum(diff_photons_out_gt)\n tf.summary.scalar(\"dpo_over_photons_in_gt\", dp_sum_gt/photon_sum_in)\n tf.summary.image(\"diff_photons_out_gt_color\", diff_photons_out_gt, max_outputs=3)\n tf.summary.scalar(\"diff_photons_out_gt\", dp_sum_gt)\n tf.summary.scalar(\"diff_fit1-diff_gt\", dp_sum_fit1 - dp_sum_gt)\n tf.summary.scalar(\"diff_photons_out_delta\", dp_sum_gt-dp_sum)\n\n #############################################################################\n \n with tf.name_scope(\"store_photon_diffs\"):\n cum_dp_sum = tf.Variable(tf.zeros_like(dp_sum), trainable=False)\n zero_dp_sum = cum_dp_sum.assign(tf.zeros_like(dp_sum))\n accum_dp_sum = cum_dp_sum.assign_add(dp_sum/n_batches)\n tf.summary.scalar(\"cum_dp_sum\", cum_dp_sum)\n with tf.name_scope(\"store_photon_diffs_gt\"):\n cum_dp_sum_gt = tf.Variable(tf.zeros_like(dp_sum_gt), trainable=False)\n zero_dp_sum_gt = cum_dp_sum_gt.assign(tf.zeros_like(dp_sum_gt))\n accum_dp_sum_gt = cum_dp_sum_gt.assign_add(dp_sum_gt/n_batches)\n with tf.name_scope(\"store_photon_diffs_fit1\"):\n cum_dp_sum_fit1 = tf.Variable(tf.zeros_like(dp_sum_gt), trainable=False)\n zero_dp_sum_fit1 = cum_dp_sum_fit1.assign(tf.zeros_like(dp_sum_gt))\n accum_dp_sum_fit1 = cum_dp_sum_fit1.assign_add(dp_sum_fit1/n_batches)\n tf.summary.scalar(\"cum_dp_sum_gt\", cum_dp_sum_gt)\n tf.summary.scalar(\"cum_dp_sum_fit1\", cum_dp_sum_fit1)\n tf.summary.scalar(\"cum_dp_sum_delta\", cum_dp_sum_gt-cum_dp_sum)\n \n \n diff_image_nonabs = output_restored - input_restored\n tf.summary.histogram(\"diff_nonabs\", diff_image_nonabs)\n diff_image = tf.abs(diff_image_nonabs)\n diff_reduced = tf.reduce_mean(diff_image, axis=0, keep_dims=True)\n diff_nonabs_reduced = tf.reduce_mean(diff_image_nonabs, axis=0, keep_dims=True)\n diff_max_reduced = tf.reduce_max(diff_image, axis=0, keep_dims=True)\n \n tf.summary.scalar(\"diff_max\", tf.reduce_max(diff_image))\n mean_diff = tf.reduce_mean(diff_image)\n tf.summary.scalar(\"diff_mean\", mean_diff)\n tf.summary.image(\"diff\", tf.minimum(diff_image, 1.5*mean_diff), max_outputs=3)\n tf.summary.image(\"thresh_diff\", tf.minimum(tf.abs(diff_image),100.0), max_outputs=3)\n tf.summary.image(\"thresh_input\", tf.minimum(input_restored, 2000.0), max_outputs=3)\n tf.summary.image(\"thresh_output\", tf.minimum(output_restored, 2000.0), max_outputs=3)\n tf.summary.image(\"diff_mean_delta5\", tf.minimum(diff_image,5.0), max_outputs=3)\n \n \n diff_gt_nonabs = input_restored - mean_gt\n tf.summary.histogram(\"diff_gt_nonabs\", diff_gt_nonabs)\n diff_gt = tf.abs(diff_gt_nonabs)\n diff_gt_reduced = tf.reduce_mean(diff_gt, axis=0, keep_dims=True)\n diff_gt_nonabs_reduced = tf.reduce_mean(diff_gt_nonabs, axis=0, keep_dims=True)\n diff_gt_max_reduced = tf.reduce_max(diff_gt, axis=0, keep_dims=True)\n tf.summary.scalar(\"diff_gt_max\", tf.reduce_max(diff_gt))\n reduce_mean_gt = tf.reduce_mean(diff_gt)\n tf.summary.scalar(\"diff_gt_mean\", reduce_mean_gt)\n tf.summary.image(\"diff_gt\", tf.minimum(diff_gt, 1.5*reduce_mean_gt), max_outputs=3)\n tf.summary.image(\"thresh_diff_gt\", tf.minimum(tf.abs(diff_gt),100.0), max_outputs=3)\n tf.summary.image(\"diff_gt_mean_delta5\", tf.minimum(diff_gt,5.0), max_outputs=3)\n \n \n with tf.name_scope(\"accuracy\"):\n accuracy = tf.reduce_sum(tf.cast(tf.less_equal(diff_image,5.0), tf.float32))/(HEIGHT*WIDTH*BATCH_SIZE)\n tf.summary.scalar(\"accuracy\", accuracy)\n accuracy_gt = tf.reduce_sum(tf.cast(tf.less_equal(diff_gt,5.0), tf.float32))/(HEIGHT*WIDTH*BATCH_SIZE)\n tf.summary.scalar(\"accuracy_gt\", accuracy_gt)\n \n #tf.summary.image(\"thresh_output\", tf.minimum(decoded, tf.constant(100.0, dtype=tf.float32, shape=[BATCH_SIZE,390,390,1])), max_outputs=3)\n \n ################################################################################\n #LOSS\n sq_diff = tf.squared_difference(y_image, decoded)*loss_weights\n sq_diff_bias = tf.squared_difference(y_image_bias, decoded_bias)\n tf.summary.histogram(\"square_diff\", sq_diff)\n tf.summary.scalar(\"bias\", tf.reduce_mean((decoded_bias-y_image_bias)*std_image))\n #tf.summary.scalar(\"diff_bias\", (decoded_bias-y_image_bias)*std_image + mean_image)\n tf.summary.image(\"square_diff\", sq_diff)\n with tf.name_scope(\"loss\"):\n loss = tf.reduce_mean(sq_diff) + sq_diff_bias*lmbda\n tf.summary.scalar(\"l2\", loss)\n tf.summary.scalar(\"loss_fine\", tf.reduce_mean(sq_diff))\n tf.summary.scalar(\"loss_coarse\", sq_diff_bias)\n \n ###################################################################################\n \n with tf.name_scope(\"store_loss\"):\n cum_loss = tf.Variable(tf.zeros_like(loss), trainable=False)\n zero_loss = cum_loss.assign(tf.zeros_like(loss))\n accum_loss = cum_loss.assign_add(loss/n_batches)\n tf.summary.scalar(\"cum_loss\", cum_loss)\n \n with tf.name_scope(\"store_accuracy\"):\n cum_acc = tf.Variable(tf.zeros_like(accuracy), trainable=False)\n zero_acc = cum_acc.assign(tf.zeros_like(accuracy))\n accum_acc = cum_acc.assign_add(accuracy/n_batches)\n tf.summary.scalar(\"cum_acc\", cum_acc)\n \n with tf.name_scope(\"store_difference\"):\n cum_diff = tf.Variable(tf.zeros_like(diff_reduced), trainable=False)\n zero_diff = cum_diff.assign(tf.zeros_like(diff_reduced))\n accum_diff = cum_diff.assign_add(diff_reduced/n_batches)\n tf.summary.image(\"cum_diff\", cum_diff)\n tf.summary.image(\"cum_diff_delta5\", tf.minimum(cum_diff,5.0))\n \n with tf.name_scope(\"store_difference_nonabs\"):\n cum_diff_nonabs = tf.Variable(tf.zeros_like(diff_nonabs_reduced), trainable=False)\n zero_diff_nonabs = cum_diff_nonabs.assign(tf.zeros_like(diff_nonabs_reduced))\n accum_diff_nonabs = cum_diff_nonabs.assign_add(diff_reduced/n_batches)\n tf.summary.image(\"cum_diff_nonabs\", cum_diff_nonabs)\n tf.summary.image(\"cum_diff_nonabs_delta5\", tf.minimum(cum_diff_nonabs,5.0))\n tf.summary.scalar(\"cum_diff_nonabs_max\", tf.reduce_max(cum_diff_nonabs))\n tf.summary.scalar(\"cum_diff_nonabs_min\", tf.reduce_min(cum_diff_nonabs))\n tf.summary.scalar(\"cum_diff_nonabs_mean\", tf.reduce_mean(cum_diff_nonabs))\n\n\n with tf.name_scope(\"store_difference_max\"):\n cum_diff_max = tf.Variable(tf.zeros_like(diff_max_reduced), trainable=False)\n zero_diff_max = cum_diff_max.assign(tf.zeros_like(diff_max_reduced))\n accum_diff_max = cum_diff_max.assign(tf.maximum(cum_diff_max, diff_max_reduced))\n tf.summary.image(\"cum_diff_max\", cum_diff)\n tf.summary.image(\"cum_diff_delta5_max\", tf.minimum(cum_diff,5.0))\n \n ##########GT\n \n sq_diff_gt = tf.squared_difference(y_image, (mean_gt - mean_image)/std_image)\n sq_diff_gt_bias = tf.squared_difference(y_image_bias, mean_gt_bias)\n tf.summary.scalar(\"bias_gt\", tf.reduce_mean((mean_gt_bias-y_image_bias)*std_image))\n #tf.summary.scalar(\"diff_bias_gt\", (mean_gt_bias-y_image_bias)*std_image + mean_image)\n tf.summary.histogram(\"square_diff_gt\", sq_diff_gt)\n tf.summary.image(\"square_diff_gt\", sq_diff_gt)\n with tf.name_scope(\"loss_gt\"):\n loss_gt = tf.reduce_mean(sq_diff_gt) + sq_diff_gt_bias*lmbda\n tf.summary.scalar(\"l1_gt\", loss_gt)\n tf.summary.scalar(\"lossgt_fine\", tf.reduce_mean(sq_diff_gt))\n tf.summary.scalar(\"lossgt_coarse\", sq_diff_gt_bias)\n \n with tf.name_scope(\"store_loss_gt\"):\n cum_loss_gt = tf.Variable(tf.zeros_like(loss_gt), trainable=False)\n zero_loss_gt = cum_loss_gt.assign(tf.zeros_like(loss_gt))\n accum_loss_gt = cum_loss_gt.assign_add(loss_gt/n_batches)\n tf.summary.scalar(\"cum_loss_gt\", cum_loss_gt)\n \n with tf.name_scope(\"store_accuracy_gt\"):\n cum_acc_gt = tf.Variable(tf.zeros_like(accuracy_gt), trainable=False)\n zero_acc_gt = cum_acc_gt.assign(tf.zeros_like(accuracy_gt))\n accum_acc_gt = cum_acc_gt.assign_add(accuracy_gt/n_batches)\n tf.summary.scalar(\"cum_acc_gt\", cum_acc_gt)\n \n with tf.name_scope(\"store_difference_gt\"):\n cum_diff_gt = tf.Variable(tf.zeros_like(diff_gt_reduced), trainable=False)\n zero_diff_gt = cum_diff_gt.assign(tf.zeros_like(diff_gt_reduced))\n accum_diff_gt = cum_diff_gt.assign_add(diff_gt_reduced/n_batches)\n tf.summary.image(\"cum_diff_gt\", cum_diff_gt)\n tf.summary.image(\"cum_diff_gt_delta5\", tf.minimum(cum_diff_gt,5.0))\n\n with tf.name_scope(\"store_difference_nonabs\"):\n cum_diff_gt_nonabs = tf.Variable(tf.zeros_like(diff_gt_nonabs_reduced), trainable=False)\n zero_diff_gt_nonabs = cum_diff_gt_nonabs.assign(tf.zeros_like(diff_gt_nonabs_reduced))\n accum_diff_gt_nonabs = cum_diff_gt_nonabs.assign_add(diff_reduced/n_batches)\n tf.summary.image(\"cum_diff_nonabs\", cum_diff_gt_nonabs)\n tf.summary.image(\"cum_diff_gt_nonabs_delta5\", tf.minimum(cum_diff_gt_nonabs,5.0))\n tf.summary.scalar(\"cum_diff_gt_nonabs_max\", tf.reduce_max(cum_diff_gt_nonabs))\n tf.summary.scalar(\"cum_diff_gt_nonabs_min\", tf.reduce_min(cum_diff_gt_nonabs))\n tf.summary.scalar(\"cum_diff_gt_nonabs_mean\", tf.reduce_mean(cum_diff_gt_nonabs))\n \n\n with tf.name_scope(\"store_difference_max_gt\"):\n cum_diff_gt_max = tf.Variable(tf.zeros_like(diff_gt_max_reduced), trainable=False)\n zero_diff_gt_max = cum_diff_gt_max.assign(tf.zeros_like(diff_gt_max_reduced))\n accum_diff_gt_max = cum_diff_gt_max.assign(tf.maximum(cum_diff_gt_max, diff_gt_max_reduced))\n tf.summary.image(\"cum_diff_gt_max\", cum_diff_gt)\n tf.summary.image(\"cum_diff_gt_delta5_max\", tf.minimum(cum_diff_gt,5.0))\n\n with tf.name_scope(\"cum_dpo_over_photons_in\"):\n cum_dpo = tf.Variable(tf.zeros_like(dp_sum), trainable=False)\n zero_cum_dpo = cum_dpo.assign(tf.zeros_like(dp_sum))\n accum_cum_dpo = cum_dpo.assign_add(dp_sum/(photon_sum_in*n_batches))\n tf.summary.scalar(\"cum_dpo_over_photons_in\", cum_dpo)\n\n with tf.name_scope(\"cum_dpo_over_photons_in_gt\"):\n cum_dpo_gt = tf.Variable(tf.zeros_like(dp_sum_gt), trainable=False)\n zero_cum_dpo_gt = cum_dpo_gt.assign(tf.zeros_like(dp_sum))\n accum_cum_dpo_gt = cum_dpo_gt.assign_add(dp_sum_gt/(photon_sum_in*n_batches))\n tf.summary.scalar(\"cum_dpo_over_photons_in_gt\", cum_dpo_gt)\n\n with tf.name_scope(\"cum_dpo_over_photons_in_fit1\"):\n cum_dpo_fit1 = tf.Variable(tf.zeros_like(dp_sum_fit1), trainable=False)\n zero_cum_dpo_fit1 = cum_dpo_fit1.assign(tf.zeros_like(dp_sum))\n accum_cum_dpo_fit1 = cum_dpo_fit1.assign_add(dp_sum_fit1/(photon_sum_in*n_batches))\n tf.summary.scalar(\"cum_dpo_over_photons_in_fit1\", cum_dpo_fit1)\n\n \n #################################################################################\n \n #Create global step variable and scaled learning rate\n\n global_step = tf.Variable(1, trainable=False, name='global_step')\n scaling = tf.Variable(1, trainable=False, name='scaling', dtype=tf.float32)\n update_scaling = scaling.assign(tf.multiply(scaling,DECAY))\n learning_rate_scaled = scaling*learning_rate\n tf.summary.scalar(\"learning_rate\", learning_rate_scaled)\n \n #OPTIMIZER\n opt = tf.train.AdamOptimizer(learning_rate=learning_rate_scaled, beta1=beta1, beta2=beta2, epsilon=epsilon)\n \n ## Retrieve all trainable variables\n tvs = tf.trainable_variables()\n \n #Variable to store partition gradients in\n with tf.device(\"/cpu:0\"):\n with tf.name_scope(\"store_gradient\"):\n accum_vars = [tf.Variable(tf.zeros_like(tv.initialized_value()), trainable=False) for tv in tvs]\n zero_ops = [tv.assign(tf.zeros_like(tv)) for tv in accum_vars]\n \n #Compute gradients on partition\n gvs = opt.compute_gradients(loss, tvs)\n \n #Add gradients on partition to total gradient\n with tf.device(\"/cpu:0\"):\n accum_ops = [accum_vars[i].assign_add(gv[0]/n_batches) for i, gv in enumerate(gvs)]\n #Create summaries using gradients\n for gv in gvs:\n tf.summary.histogram((gv[1].name + \"/gradient\").replace(\":\",\"_\"), gv[0])\n #Apply total gradient\n with tf.device(\"/cpu:0\"):\n train_step = opt.apply_gradients(grads_and_vars=[(accum_vars[i], gv[1]) for i, gv in enumerate(gvs)], global_step=global_step)\n \n summary = tf.summary.merge_all()\n saver = tf.train.Saver(max_to_keep=2)\n sess.run(tf.global_variables_initializer())\n \n #Load previous checkpoints if possible\n lr, b1, b2, eps, lmb = sess.run([learning_rate,beta1,beta2,epsilon,lmbda])\n sub_logdir = str(sys.argv[1])\n if sub_logdir == \"0\":\n sub_logdir=LOGDIR + make_param_string(lr,b1,b2,eps,lmb,n_batches)\n print(sub_logdir)\n else:\n sub_logdir = LOGDIR + sub_logdir\n #Load previous checkpoints if possible\n checkpoint = tf.train.get_checkpoint_state(sub_logdir)\n if checkpoint and checkpoint.model_checkpoint_path:\n print(checkpoint.model_checkpoint_path)\n saver.restore(sess, tf.train.latest_checkpoint(sub_logdir))\n \n train_writer = tf.summary.FileWriter(sub_logdir + \"/train\")\n eval_writer = tf.summary.FileWriter(sub_logdir+\"/eval\")\n train_writer.add_graph(sess.graph)\n \n \n \n sess.graph.finalize()\n \n #Perform actual training steps on the created graph\n start = time.time()\n start_step = sess.run(global_step)\n print(\"epoch\\t\\tseconds\\t\\tex/sec\\t\\tloss\\taccuracy\\tmean_diff\")\n h5data = np.zeros((BATCH_SIZE,HEIGHT,WIDTH), dtype=np.float32)\n observed_loss=1E22 #Large initial loss to not apply scaling immediately\n observe_step=start_step #Step at which the most recent lowest loss was observed\n i = start_step-1\n while i < start_step + TRAINING_STEPS:\n print(i)\n i=i+1\n #Reset accumulators\n sess.run([zero_ops, zero_diff, zero_loss, zero_acc, zero_diff_max, zero_diff_gt, zero_loss_gt, \n zero_acc_gt, zero_diff_gt_max, zero_diff_nonabs, zero_diff_gt_nonabs,zero_dp_sum, zero_dp_sum_gt, zero_dp_sum_fit1,\n zero_cum_dpo, zero_cum_dpo_fit1, zero_cum_dpo_gt])\n\n # Accumulate the gradients and other accumulators\n for j in range(0,N_BATCHES):\n for k in range(0,BATCH_SIZE):\n index = ((i-1)*BATCH_SIZE*N_BATCHES+j*BATCH_SIZE+k)%EPOCH\n if index == EPOCH - 1:\n rand.shuffle(train_indices)\n k_index = np.argwhere(indices == train_indices[index])\n h5data[k,:,:] = np.array(h5dataset[train_indices[index],:,:], dtype=np.float32)\n sess.run([accum_ops, accum_diff, accum_loss, accum_acc, accum_diff_max, accum_diff_gt, \n accum_loss_gt, accum_acc_gt, accum_diff_gt_max, accum_diff_nonabs, accum_diff_gt_nonabs,\n accum_dp_sum, accum_dp_sum_gt, accum_dp_sum_fit1, accum_cum_dpo, accum_cum_dpo_fit1, accum_cum_dpo_gt],\n feed_dict={y: h5data, sample_k: k_load[k_index,0,0], n_batches: np.reshape(np.array(N_BATCHES, dtype=np.int32), ())})\n\n \n #First summary\n if i == 1:\n s, l, a, md = sess.run([summary, loss, accuracy, mean_diff], feed_dict={y: h5data, sample_k: k_load[k_index,0,0], n_batches: np.reshape(np.array(N_BATCHES, dtype=np.int32), ())})\n train_writer.add_summary(s,i/SUMMARY_STEPS)\n print(\"%.2f\\t\\t-\\t\\t-\\t\\t%.2E\\t%.2f\\t%.2f\" % (float(i)/float(EPOCH_STEPS), l, a, md))\n observed_loss = l\n observe_step = 1\n # Run the train_step ops to update the weights based on your accumulated gradients\n print(\"before train\")\n sess.run(train_step, feed_dict={y: h5data, sample_k: k_load[k_index,0,0], n_batches: np.reshape(np.array(N_BATCHES, dtype=np.int32), ())})\n print(\"after train\")\n #Summary\n# if i % 1 == 0:\n if i % SUMMARY_STEPS == 0:\n print(\"A\")\n end = time.time()\n previous_start = start\n start = time.time()\n s, l, a, md = sess.run([summary, cum_loss, cum_acc, mean_diff], feed_dict={y: h5data, sample_k: k_load[k_index,0,0], n_batches: np.reshape(np.array(N_BATCHES, dtype=np.int32), ())})\n print(\"B\")\n train_writer.add_summary(s,i/SUMMARY_STEPS)\n print(\"C\")\n print(\"%.2f\\t\\t%.2f\\t\\t%.2f\\t\\t%.2E\\t%.2f\\t%.2f\" % (float(i)/float(EPOCH_STEPS), end-previous_start, (SUMMARY_STEPS*BATCH_SIZE*N_BATCHES)/(end-previous_start), l, a, md))\n \n #Scale learning rate if loss is not decreasing\n if l < observed_loss:\n observed_loss = l\n observe_step = i\n elif (l >= observed_loss) and (i - observe_step > DECAY_STEPS):\n sess.run(update_scaling)\n observed_loss = l\n observe_step = i\n\n #Save checkpoint of variables\n if i % CKPT_STEPS == 0:\n saver.save(sess, sub_logdir + \"model.ckpt\", global_step=i, write_meta_graph=False)\n print(\"Checkpoint saved\")\n sess.run([zero_ops, zero_diff, zero_loss, zero_acc, zero_diff_max, zero_diff_gt, zero_loss_gt, \n zero_acc_gt, zero_diff_gt_max, zero_diff_nonabs, zero_diff_gt_nonabs,zero_dp_sum, zero_dp_sum_gt, zero_dp_sum_fit1,\n zero_cum_dpo, zero_cum_dpo_fit1, zero_cum_dpo_gt])\n \n j = 0\n while j < len(validation_indices):\n k_index = np.argwhere(indices == validation_indices[j])\n sess.run([accum_ops, accum_diff, accum_loss, accum_acc, accum_diff_max, accum_diff_gt, \n accum_loss_gt, accum_acc_gt, accum_diff_gt_max, accum_diff_nonabs, accum_diff_gt_nonabs, accum_dp_sum_fit1,\n accum_cum_dpo, accum_cum_dpo_fit1, accum_cum_dpo_gt], \n feed_dict={y: np.reshape(h5dataset[validation_indices[j],:,:],(1,HEIGHT,WIDTH)), sample_k: k_load[k_index,0,0], n_batches: len(validation_indices)})\n j = j + 1\n s = sess.run(summary, feed_dict={y: np.reshape(h5dataset[validation_indices[j-1],:,:],(1,HEIGHT,WIDTH)), sample_k: k_load[k_index,0,0], n_batches: len(validation_indices)})\n eval_writer.add_summary(s,i/SUMMARY_STEPS)\n\n \n \n eval_writer.close()\n train_writer.close()\n \n \n#Creates concise names of directories\ndef make_param_string(learning_rate, beta1, beta2, epsilon, lmbda, batch_size):\n return \"lr_%.1E_b1_%.1E_b2_%.1E_eps_%.1E_lmbda_%.1E_bsize_%i/\" % (learning_rate, beta1, beta2, epsilon, lmbda, N_BATCHES)\n\n\n#Locally connected layer with no up or downsampling\ndef loc_layer(inputs=None, kernel=3, n_channels=-1, name=\"\", activation=True):\n #Input har batch_size*x*y*channels\n shape = inputs.get_shape()\n height = shape[1].value\n width = shape[2].value\n channels = shape[3].value #Number of inputchannels\n if n_channels == -1:\n n_channels = channels #Number of output channels\n \n\n sz_local = kernel # kernel size\n sz_patch = (sz_local**2)*channels\n \n # Extract 3x3 tensor patches\n patches1 = tf.extract_image_patches(inputs, ksizes=[1,sz_local,sz_local,1],\n strides=[1,1,1,1],\n rates=[1,1,1,1],\n padding=\"SAME\")\n#with tf.device(\"/cpu:0\"):\n weights1 = tf.get_variable(\"W1_\"+name, [1,height,width,sz_patch, n_channels], \n initializer=tf.truncated_normal_initializer(stddev=np.sqrt(1.0/sz_patch), dtype=tf.float32))\n tf.summary.histogram(\"W1_\"+name+\"_0\", weights1)\n#with tf.device(\"/cpu:0\"):\n biases1 = tf.get_variable(\"b1_\"+name, [1,height,width,n_channels], initializer=tf.constant_initializer(0))\n tf.summary.histogram(\"b1_\"+name+\"_0\", biases1)\n\n # \"Filter\" each patch with its own kernel \n mul1 = tf.multiply(tf.expand_dims(patches1, axis=-1), weights1)\n ssum1 = tf.reduce_sum(mul1, axis=3)\n local1 = tf.add(ssum1, biases1)\n if activation:\n local1 = tf.nn.selu(local1)\n \n return local1\n\n#Locally connected layer with downsampling\ndef loc_downsample(inputs=None, kernel=2, n_channels=-1, name=\"\", activation=True):\n #Input har batch_size*x*y*channels\n shape = inputs.get_shape()\n \n channels = shape[3].value\n if n_channels == -1:\n n_channels = channels\n\n sz_local = kernel # kernel size\n sz_patch = (sz_local**2)*channels\n height = shape[1].value/sz_local\n width = shape[2].value/sz_local\n \n\n # Extract 3x3 tensor patches\n patches1 = tf.extract_image_patches(inputs, ksizes=[1,sz_local,sz_local,1],\n strides=[1,sz_local,sz_local,1],\n rates=[1,1,1,1],\n padding=\"SAME\")\n#with tf.device(\"/cpu:0\"):\n weights1 = tf.get_variable(\"W1_\"+name, [1,height,width,sz_patch, n_channels], initializer=tf.truncated_normal_initializer(stddev=np.sqrt(1.0/sz_patch), dtype=tf.float32))\n tf.summary.histogram(\"W1_\"+name+\"_0\", weights1)\n#with tf.device(\"/cpu:0\"):\n biases1 = tf.get_variable(\"b1_\"+name, [1,height,width,n_channels], initializer=tf.constant_initializer(0))\n tf.summary.histogram(\"b1_\"+name+\"_0\", biases1)\n\n # \"Filter\" each patch with its own kernel \n mul1 = tf.multiply(tf.expand_dims(patches1, axis=-1), weights1)\n ssum1 = tf.reduce_sum(mul1, axis=3)\n local1 = tf.add(ssum1, biases1)\n if activation:\n local1 = tf.nn.selu(local1)\n\n return local1\n\n#Locally connected layer with upsampling\ndef loc_upsample(inputs=None, kernel=2, n_channels=-1, name=\"\", activation=True):\n #Input har batch_size*x*y*channels\n shape = inputs.get_shape()\n \n channels = shape[3].value\n if n_channels == -1:\n n_channels = channels\n\n sz_local = kernel # kernel size\n sz_patch = channels\n height = shape[1].value*sz_local\n width = shape[2].value*sz_local\n \n upsample1 = tf.image.resize_images(inputs, size=(height,width), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n # Extract 3x3 tensor patches\n#with tf.device(\"/cpu:0\"):\n weights1 = tf.get_variable(\"W1_\"+name, [1,height,width,channels,n_channels], initializer=tf.truncated_normal_initializer(stddev=np.sqrt(1.0/sz_patch), dtype=tf.float32))\n tf.summary.histogram(\"W1_\"+name+\"_0\", weights1)\n#with tf.device(\"/cpu:0\"):\n biases1 = tf.get_variable(\"b1_\"+name, [1,height,width,n_channels], initializer=tf.constant_initializer(0))\n tf.summary.histogram(\"b1_\"+name+\"_0\", biases1)\n\n # \"Filter\" each patch with its own kernel \n mul1 = tf.multiply(tf.expand_dims(upsample1, axis=-1), weights1)\n ssum1 = tf.reduce_sum(mul1, axis=3)\n local1 = tf.add(ssum1, biases1)\n if activation:\n local1 = tf.nn.selu(local1)\n return local1\n\n#Convolutional layer\ndef conv_layer(inputs=None, filters=10, kernel=3, strides=1, name=\"conv\", activation=False):\n #g = tf.get_default_graph()\n if activation:\n inputs = tf.layers.batch_normalization(inputs=inputs, axis=3)\n inputs = tf.nn.leaky_relu(inputs)\n conv = tf.layers.conv2d(\n inputs=inputs,\n filters=filters,\n strides=strides,\n kernel_size=kernel,\n padding=\"same\",\n name=name,\n activation=None,\n kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN', uniform=True),#tf.truncated_normal_initializer()#tf.contrib.layers.xavier_initializer(),\n bias_initializer=tf.constant_initializer(0))\n #conv_kernel = g.get_tensor_by_name(name + \"/kernel:0\")\n #conv_bias = g.get_tensor_by_name(name + '/bias:0')\n #tf.summary.histogram(name+\"/kernel_0/value\", conv_kernel)\n #tf.summary.histogram(name+\"/bias_0/value\", conv_bias)\n #tf.summary.histogram(name+\"/activations\",conv)\n return conv\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"archAB.py","file_name":"archAB.py","file_ext":"py","file_size_in_byte":45133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"428062260","text":"# 入力\nN = int(input())\nA = list(map(int,input().split()))\n\nP = 10**9+7\nbit = [0 for i in range(60)]\n\n# 各bit独立に1が何個あるか数える\nfor a in A:\n S = bin(a)\n for j in range(1,len(S)-1):\n if S[-1*j] == '1':\n bit[j-1] += 1\n\n# 各bitの1の個数と0の個数の積が、XOR和が1になる数字の個数と一致\nans = 0\nfor i,a in enumerate(bit):\n ans += a*(N-a)*(2**i)\n ans %= P\n\nprint(ans)\n","sub_path":"ABC/ABC_147/abc147d.py","file_name":"abc147d.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"453276351","text":"def zeru_up_line():\n global a,n,m\n for i in range (n):\n for k in range (m):\n if k > i:\n a[i][k]=0\n\nn = int(input(\"Введите количество строк: \"))\nm = int(input(\"Введите количество столбцов: \"))\nfrom random import randint\na = [[randint(-20, 20) for i in range (n)] for i in range (m)]\nprint()\nprint(\"Ваша сгенерированная матрица: \")\nfor row in a:\n print(\" \".join(str(elem) for elem in row))\nprint()\ndel randint\n\nzeru_up_line()\nfor row in a:\n print(\" \".join(str(elem) for elem in row))\nprint()\n\n","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"468032863","text":"from telegram.ext import Updater, CommandHandler\nfrom config import TELEGRAM_TOKEN, APP_PORT, APP_HOST, TRANSPORTERS_CONFIG, JOB_INTERVAL\nfrom transporter import Transporter9911, Transporter618\nfrom utils import build_request\nfrom models import User, RequestStatus, Request\nfrom db import DB, UserSQL, RequestSQL\nimport logging\n\ntransporters = {\n '9911.by': Transporter9911(TRANSPORTERS_CONFIG['9911.by']),\n '618.by': Transporter618(TRANSPORTERS_CONFIG['618.by'])\n}\n\nlogger = logging.getLogger(__name__)\n\n\ndef init_job_queue(job_queue):\n requests = DB.execute(RequestSQL.get_active_requests)\n logger.info(f'Add next requests to job queue during initialization: {[r[\"request_id\"] for r in requests]}')\n\n for data in requests:\n request = Request.from_dict(data)\n job_queue.run_repeating(process_request, interval=JOB_INTERVAL, first=0, context=request, name=request.id)\n\n\ndef process_request(context):\n request = context.job.context\n\n transporter_site = TRANSPORTERS_CONFIG[request.transporter]['site']\n tickets = transporters[request.transporter].find_ticket(request)\n\n message = f'Following tickets are available for your request *{request.id}*. To order them go to {transporter_site}:'\n\n if tickets:\n context.bot.send_message(chat_id=request.user_id, text=message, parse_mode='Markdown')\n for ticket in tickets:\n context.bot.send_message(chat_id=request.user_id, text=f'{ticket}')\n\n\ndef request_trip(update, context):\n user = User.from_dict(update.message.from_user)\n DB.execute(UserSQL.save, user.asdict())\n logger.info(f'Starting process find ticket request from {user.username} user: {context.args}')\n\n request = build_request(user.id, context.args)\n request.id = DB.execute(RequestSQL.create, request.asdict())[0][0]\n\n message = f'Your request *{request.id}* is processing...'\n context.bot.send_message(user.id, text=message, parse_mode='Markdown')\n context.job_queue.run_repeating(process_request, interval=JOB_INTERVAL, first=0, context=request, name=request.id)\n\n DB.execute(RequestSQL.update_status_by_id, [RequestStatus.in_progress.value, request.id])\n logger.info(f'Request {request.id} from {user.username} user was successfully added to job queue')\n\n\ndef remove_request(update, context):\n request_id = context.args[0]\n logger.info(f'Starting process of removing {request_id} request id by {update.message.from_user[\"username\"]} user')\n\n jobs = context.job_queue.get_jobs_by_name(int(request_id))\n message = f'Your request *{request_id}* was successfully closed'\n if jobs:\n for job in jobs:\n job.schedule_removal()\n DB.execute(RequestSQL.close_by_id, [request_id])\n else:\n message = f'Unfortunately request *{request_id}* doesn\\'t exist'\n context.bot.send_message(update.effective_chat.id, text=message, parse_mode='Markdown')\n\n\ndef main():\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n updater = Updater(token=TELEGRAM_TOKEN, use_context=True)\n dispatcher = updater.dispatcher\n\n init_job_queue(dispatcher.job_queue)\n\n request_handler = CommandHandler('request', request_trip)\n stop_handler = CommandHandler('stop', remove_request)\n logger.info(f'All handlers was successfully created')\n\n dispatcher.add_handler(request_handler)\n dispatcher.add_handler(stop_handler)\n logger.info(f'All handlers was successfully added to dispatcher')\n\n updater.start_webhook(listen=\"0.0.0.0\", port=int(APP_PORT), url_path=TELEGRAM_TOKEN)\n updater.bot.setWebhook(f'{APP_HOST}{TELEGRAM_TOKEN}')\n logger.info(f'Webhook was started')\n\n updater.idle()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"569139837","text":"from django.db import models\n\nfrom mps.base.models import LockableModel\nfrom microdevices.models import OrganModel\nfrom cellsamples.models import Organ\nfrom diseases.models import Disease\n\nfrom assays.models import PhysicalUnits\n\nfrom django.core.exceptions import ValidationError\n\n\nclass Species(LockableModel):\n \"\"\"A Species defines a particular species\"\"\"\n class Meta(object):\n verbose_name_plural = 'Species'\n ordering = ('species_name', )\n\n species_name = models.CharField(\n max_length=40,\n unique=True,\n verbose_name='Species'\n )\n\n def __str__(self):\n return self.species_name\n\n\nPARTICIPANTTYPES = (\n ('I', 'Individual'),\n ('P', 'Population'),\n)\n\nTRIALSUBTYPES = (\n ('C', 'Case Report'),\n ('P', 'Population Report'),\n ('U', 'Unknown / Unspecified'),\n)\n\n# Participants information is now part of DrugTrials model\n# instead of a seperate entity\n\n\nclass TrialSource(LockableModel):\n \"\"\"A Trial Source indicates where a trial came from and provides some information\"\"\"\n class Meta(object):\n ordering = ('source_name', )\n source_name = models.CharField(max_length=40, unique=True)\n source_website = models.URLField(blank=True, null=True)\n description = models.CharField(max_length=400, blank=True, default='')\n\n def __str__(self):\n return self.source_name\n\n\nTRIALTYPES = (\n ('S', 'Microphysiology'),\n ('P', 'Preclinical'),\n ('C', 'Clinical'),\n ('M', 'Post-marketing'),\n ('B', 'Combined Clinical-Post Market'),\n ('U', 'Unknown / Unspecified'),\n)\n\n\nclass DrugTrial(LockableModel):\n \"\"\"A Drug Trial describes the participants and high-level data of a drug trial\"\"\"\n class Meta(object):\n verbose_name = 'Drug Trial'\n ordering = ('compound', 'species', )\n\n # title = models.CharField(max_length=2000, unique=True)\n title = models.CharField(max_length=2000)\n condition = models.CharField(max_length=1400, blank=True, default='')\n source = models.ForeignKey(TrialSource, on_delete=models.CASCADE)\n compound = models.ForeignKey('compounds.Compound', blank=True, null=True, on_delete=models.CASCADE)\n\n # Figures\n figure1 = models.ImageField(upload_to='figures', null=True, blank=True)\n figure2 = models.ImageField(upload_to='figures', null=True, blank=True)\n\n # Participant Information\n\n species = models.ForeignKey(\n Species,\n default='1',\n blank=True,\n null=True,\n on_delete=models.CASCADE\n )\n\n gender = models.CharField(max_length=1,\n choices=(\n ('F', 'female'), ('M', 'male'),\n ('X', 'mixed'),\n ('U', 'unknown or unspecified'),\n ),\n default='U',\n blank=True)\n\n population_size = models.CharField(max_length=50,\n default='1',\n blank=True)\n\n age_average = models.FloatField(blank=True, null=True)\n\n age_max = models.FloatField(blank=True, null=True)\n\n age_min = models.FloatField(blank=True, null=True)\n\n age_unit = models.CharField(max_length=1,\n blank=True,\n choices=(\n ('M', 'months'), ('Y', 'years')\n ),\n default='Y')\n\n weight_average = models.FloatField(blank=True, null=True)\n weight_max = models.FloatField(blank=True, null=True)\n weight_min = models.FloatField(blank=True, null=True)\n weight_unit = models.CharField(max_length=1, blank=True,\n choices=(\n ('K', 'kilograms'), ('L', 'pounds'),\n ),\n default='L')\n\n # End of Participant Information\n\n disease = models.ManyToManyField(Disease, blank=True)\n trial_type = models.CharField(max_length=1, choices=TRIALTYPES)\n trial_sub_type = models.CharField(max_length=1,\n choices=TRIALSUBTYPES, default='C')\n start_date = models.DateField(blank=True, null=True)\n end_date = models.DateField(blank=True, null=True)\n publish_date = models.DateField(blank=True, null=True)\n description = models.CharField(max_length=1400, blank=True, default='')\n source_link = models.URLField(blank=True, null=True)\n references = models.CharField(max_length=400, default='',\n verbose_name='Trial ID/Reference')\n\n def __str__(self):\n return '{} from {}'.format(dict(TRIALTYPES)[self.trial_type],\n self.source.source_name)\n\n def get_absolute_url(self):\n return \"/drugtrials/{}/\".format(self.id)\n\n\n# DEPRECATED AND SUBJECT TO REMOVAL\nclass TestType(LockableModel):\n \"\"\"A Test Type describes what sort of test was performed\n\n THIS MODEL HAS BEEN DEPRECATED IN FAVOR OF FINDINGTYPE\n \"\"\"\n class Meta(object):\n ordering = ('test_type',)\n test_type = models.CharField(max_length=60, unique=True)\n description = models.CharField(max_length=200, blank=True, default='')\n\n def __str__(self):\n return self.test_type\n\n\n# DEPRECATED AND SUBJECT TO REMOVAL\n# The only difference between Test and Finding is the ability to select and organ model\n# However, it was decided to use DrugTrials for EXTERNAL data only\nclass Test(LockableModel):\n \"\"\"A Test describes and instance of a test performed\n\n THIS MODEL HAS BEEN DEPRECATED IN FAVOR OF FINDING\n \"\"\"\n class Meta(object):\n unique_together = [('test_type', 'test_name')]\n ordering = ('test_name', 'organ', 'test_type', )\n\n organ_model = models.ForeignKey(\n OrganModel,\n blank=True,\n null=True,\n on_delete=models.CASCADE\n )\n test_type = models.ForeignKey(TestType, on_delete=models.CASCADE)\n test_name = models.CharField(max_length=40,\n verbose_name='Organ Function Test')\n test_unit = models.CharField(max_length=40, blank=True, default='')\n organ = models.ForeignKey(Organ, blank=True, null=True, on_delete=models.CASCADE)\n description = models.CharField(max_length=400, blank=True, default='')\n\n def __str__(self):\n return '{} :: {} :: {}'.format(\n self.organ,\n self.test_type,\n self.test_name\n )\n\n\nclass FindingType(LockableModel):\n \"\"\"Finding Type describes a type of finding (e.g. biopsy)\"\"\"\n class Meta(object):\n ordering = ('finding_type', )\n\n finding_type = models.CharField(max_length=100, unique=True)\n description = models.CharField(max_length=200, blank=True, default='')\n\n def __str__(self):\n return self.finding_type\n\n\nclass Finding(LockableModel):\n \"\"\"Finding describes a finding relative to what organ (e.g. liver biopsy)\"\"\"\n class Meta(object):\n unique_together = [('organ', 'finding_name')]\n ordering = ('organ', 'finding_type', 'finding_name', )\n\n finding_type = models.ForeignKey(FindingType, on_delete=models.CASCADE)\n finding_name = models.CharField(max_length=100)\n finding_unit = models.CharField(max_length=40, blank=True, default='')\n organ = models.ForeignKey(\n Organ,\n blank=True,\n null=True,\n on_delete=models.CASCADE\n )\n # Subject to removal?\n description = models.CharField(max_length=400, blank=True, default='')\n\n def __str__(self):\n return '{} :: {} :: {}'.format(self.organ, self.finding_type, self.finding_name)\n\n\nclass ResultDescriptor(LockableModel):\n \"\"\"A Result Descriptor adds detail to a Finding (e.g. increased, female only, etc.\"\"\"\n class Meta(object):\n ordering = ('result_descriptor', )\n result_descriptor = models.CharField(max_length=40, unique=True)\n\n def __str__(self):\n return self.result_descriptor\n\n\nSEVERITY_SCORE = (\n ('-1', 'UNKNOWN'), ('0', 'NEGATIVE'), ('1', '+'), ('2', '+ +'),\n ('3', '+ + +'), ('4', '+ + + +'), ('5', '+ + + + +')\n)\n\n# DEPRECATED\nTIME_UNITS = (\n ('u', 'unknown'), ('h', 'hours'), ('d', 'days'),\n ('w', 'weeks'), ('m', 'months'), ('y', 'years')\n)\n\nPOSNEG = (\n ('0', 'Neg'), ('1', 'Pos')\n)\n\nRESULT_TYPE = (\n ('B', 'Biopsy'), ('R', 'Report'), ('M', 'Mechanism'), ('I', 'Information')\n)\n\n\n# DEPRECATED AND SUBJECT TO REMOVAL\nclass TestResult(models.Model):\n \"\"\"A Test Result describes a specific discovery from a organ model test\n\n THIS MODEL IS DEPRECATED\n \"\"\"\n drug_trial = models.ForeignKey(DrugTrial, on_delete=models.CASCADE)\n\n test_name = models.ForeignKey(\n Test,\n verbose_name='Test',\n blank=True,\n null=True,\n on_delete=models.CASCADE\n )\n\n test_time = models.FloatField(verbose_name='Time', blank=True, null=True)\n\n time_units = models.ForeignKey(\n PhysicalUnits,\n blank=True,\n null=True,\n related_name='test_time_units',\n on_delete=models.CASCADE\n )\n\n result = models.CharField(default='1',\n max_length=8,\n choices=POSNEG,\n verbose_name='Pos/Neg?',\n blank=True)\n\n severity = models.CharField(default='-1',\n max_length=5,\n choices=SEVERITY_SCORE,\n verbose_name='Severity',\n blank=True)\n\n percent_min = models.FloatField(blank=True,\n null=True,\n verbose_name='Min Affected (% Population)')\n\n percent_max = models.FloatField(blank=True,\n null=True,\n verbose_name='Max Affected (% Population)')\n\n descriptor = models.ForeignKey(ResultDescriptor, blank=True, null=True, on_delete=models.CASCADE)\n\n value = models.FloatField(blank=True, null=True)\n\n value_units = models.ForeignKey(PhysicalUnits, blank=True, null=True, related_name='test_value_units', on_delete=models.CASCADE)\n\n def clean(self):\n \"\"\"Require units to be specified if a value is present\"\"\"\n\n if self.value or self.value_units:\n if not (self.drug_trial.source and self.drug_trial.source_link\n and self.value_units and self.value):\n raise ValidationError(\n \"Values and units must have a defined source \"\n \"and source link\"\n )\n\n if self.value:\n if not self.value_units:\n raise ValidationError(\n \"You must specify valid units \"\n \"for the value you entered\"\n )\n\n if not self.value:\n if self.value_units:\n raise ValidationError(\"You forgot to enter a value!\")\n\n def __str__(self):\n return ''\n\nFREQUENCIES = (\n ('>= 10%', '>= 10% : Very Common'), ('1 - < 10%', '1 - < 10% : Common'),\n ('0.1 - < 1%', '0.1 - < 1% : Uncommon'), ('0.01 - < 0.1%', '0.01 - < 0.1% : Rare'),\n ('< 0.01%', '< 0.01% : Very Rare')\n)\n\n\nclass FindingResult(models.Model):\n \"\"\"A Finding Result describes in detail a single finding from a Drug Trial\"\"\"\n\n class Meta(object):\n verbose_name = 'Drug Trial Result'\n\n drug_trial = models.ForeignKey(DrugTrial, on_delete=models.CASCADE)\n\n finding_name = models.ForeignKey(\n Finding,\n verbose_name='Finding',\n on_delete=models.CASCADE\n )\n\n finding_time = models.FloatField(verbose_name='Time', blank=True, null=True)\n\n time_units = models.ForeignKey(PhysicalUnits, blank=True, null=True, related_name='finding_time_units', on_delete=models.CASCADE)\n\n result = models.CharField(default='1',\n max_length=8,\n choices=POSNEG,\n verbose_name='Pos/Neg?')\n\n severity = models.CharField(default='-1',\n max_length=5,\n choices=SEVERITY_SCORE,\n verbose_name='Severity',\n blank=True)\n\n # May drop percent_min later, hide for now\n percent_min = models.FloatField(blank=True,\n null=True,\n verbose_name='Min Affected (% Population)')\n\n # May drop percent_max later, hide for now\n percent_max = models.FloatField(blank=True,\n null=True,\n verbose_name='Max Affected (% Population)')\n\n frequency = models.CharField(choices=FREQUENCIES,\n max_length=25,\n blank=True,\n default='')\n\n descriptor = models.ForeignKey(ResultDescriptor, blank=True, null=True, on_delete=models.CASCADE)\n\n value = models.FloatField(blank=True, null=True)\n\n value_units = models.ForeignKey(PhysicalUnits, blank=True, null=True, related_name='finding_value_units', on_delete=models.CASCADE)\n\n notes = models.CharField(max_length=2048, blank=True, default='')\n\n def get_absolute_url(self):\n return self.drug_trial.get_absolute_url()\n\n def __str__(self):\n treatments = []\n\n for treatment in self.findingtreatment_set.all():\n treatments.append(str(treatment))\n\n treatments = '; '.join(treatments)\n\n return '{}: {} for {}'.format(str(self.drug_trial), str(self.finding_name), treatments)\n\n\nclass FindingTreatment(models.Model):\n \"\"\"Finding Treatments are tied to Findings, and elaborate on the compounds and concentrations involved therein\"\"\"\n compound = models.ForeignKey('compounds.Compound', on_delete=models.CASCADE)\n finding_result = models.ForeignKey(FindingResult, on_delete=models.CASCADE)\n concentration = models.FloatField(blank=True, null=True)\n concentration_unit = models.ForeignKey(\n 'assays.PhysicalUnits',\n blank=True,\n null=True,\n verbose_name='Concentration Unit',\n on_delete=models.CASCADE\n )\n\n def __str__(self):\n if self.concentration:\n return '{} {} {}'.format(self.compound, self.concentration, self.concentration_unit)\n else:\n return '{}'.format(self.compound)\n\n\nclass AdverseEvent(models.Model):\n \"\"\"An Adverse Event describes an adverse event and what organ it affects\"\"\"\n event = models.CharField(max_length=100)\n organ = models.ForeignKey(Organ, blank=True, null=True, on_delete=models.CASCADE)\n\n def __str__(self):\n return '{}'.format(self.event)\n\n\n# TODO think of a better name\n# TODO what other fields should be placed here?\n# Theoretically, we would place usage information here, but that is difficult to acquire\n# If we can't think of anything, scrap this model before you put it on production\nclass OpenFDACompound(LockableModel):\n \"\"\"An OpenFDACompound describes a compound with some data from OpenFDA\"\"\"\n class Meta(object):\n verbose_name = 'OpenFDA Report'\n\n compound = models.ForeignKey('compounds.Compound', on_delete=models.CASCADE)\n warnings = models.TextField(blank=True, default='')\n black_box = models.BooleanField(default=False)\n\n # Insights into non-human toxicology (can be useful)\n nonclinical_toxicology = models.TextField(blank=True, default='')\n\n # Deemed less than useful\n # clinical_studies = models.TextField(blank=True, default='')\n # Deemed less than useful\n # laboratory_tests = models.TextField(blank=True, default='')\n\n # For normalizing data, may change\n estimated_usage = models.IntegerField(blank=True, null=True)\n\n def __str__(self):\n return '{}'.format(self.compound.name)\n\n def get_absolute_url(self):\n return \"/adverse_events/{}/\".format(self.id)\n\n\nclass CompoundAdverseEvent(models.Model):\n \"\"\"A Compound Adverse Event describes an adverse event's frequency as brought on by a compound\"\"\"\n # CompoundAdverseEvents are inlines in OpenFDACompound (name subject to change)\n compound = models.ForeignKey('OpenFDACompound', on_delete=models.CASCADE)\n event = models.ForeignKey(AdverseEvent, on_delete=models.CASCADE)\n frequency = models.IntegerField()\n\n def __str__(self):\n return '{}:{}'.format(self.compound, self.event)\n","sub_path":"drugtrials/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":16514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"132576558","text":"\"\"\"Measure baseline performance.\n\nThe purpose of this script is to measure the baseline performance, i.e.\nthe performance that we obtain by looking at:\n\n 1. *All* species and their respective spectra; accumulating their\n classification performance\n\n 2. *Only* the species information without considering any spectra\n\"\"\"\n\nimport argparse\nimport dotenv\nimport joblib\nimport logging\nimport os\nimport json\nimport pathlib\nimport warnings\n\nimport numpy as np\n\nfrom maldi_learn.driams import DRIAMSDatasetExplorer\nfrom maldi_learn.driams import DRIAMSLabelEncoder\n\nfrom models import run_experiment\n\nfrom utilities import generate_output_filename\nfrom utilities import load_stratify_split_data\n\nfrom sklearn.preprocessing import OneHotEncoder\n\ndotenv.load_dotenv()\nDRIAMS_ROOT = os.getenv('DRIAMS_ROOT')\n\n# These parameters should remain fixed for this particular\n# experiment. We always train on the same data set, using\n# *all* available years.\nsite = 'DRIAMS-A'\nyears = ['2015', '2016', '2017', '2018']\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '-a', '--antibiotic',\n type=str,\n help='Antibiotic for which to run the experiment',\n required=True,\n )\n\n parser.add_argument(\n '-m', '--model',\n default='lr',\n help='Selects model to use for subsequent training'\n )\n\n parser.add_argument(\n '-S', '--seed',\n type=int,\n help='Random seed to use for the experiment',\n required=True\n )\n\n name = 'baseline_case_based_stratification'\n\n parser.add_argument(\n '-o', '--output',\n default=pathlib.Path(__file__).resolve().parent.parent / 'results'\n / name,\n type=str,\n help='Output path for storing the results.'\n )\n\n parser.add_argument(\n '-f', '--force',\n action='store_true',\n help='If set, overwrites all files. Else, skips existing files.'\n )\n\n args = parser.parse_args()\n\n # Create the output directory for storing all results of the\n # individual combinations.\n os.makedirs(args.output, exist_ok=True)\n\n # Basic log configuration to ensure that we see where the process\n # spends most of its time.\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s %(message)s'\n )\n\n explorer = DRIAMSDatasetExplorer(DRIAMS_ROOT)\n metadata_fingerprints = explorer.metadata_fingerprints(\n site,\n id_suffix='strat'\n )\n\n logging.info(f'Site: {site}')\n logging.info(f'Years: {years}')\n logging.info(f'Seed: {args.seed}')\n logging.info(f'Antibiotic: {args.antibiotic}')\n\n X_train, y_train, X_test, y_test, meta_train, meta_test = load_stratify_split_data(\n DRIAMS_ROOT,\n site,\n years,\n '*',\n args.antibiotic,\n args.seed\n )\n\n logging.info(f'Loaded data set for {args.antibiotic}')\n print(meta_train.head())\n\n # Having loaded the data set, we have to generate two different\n # feature vectors:\n #\n # 1. The 'regular' feature vector as returned by our data set\n # loader. This necessitates no additional transformation.\n #\n # 2. The feature vector that we obtain by throwing away *all*\n # information about the spectra, leaving us only with sets\n # of one-hot-encoded species information.\n #\n # The purpose of the second experiment is to assess to what extent\n # microbial resistance can be be predicted based on information\n # about the species.\n\n ohe = OneHotEncoder(sparse=False)\n species_vector = np.r_[meta_train['species'].values, meta_test['species'].values]\n \n ohe.fit(species_vector.reshape(-1,1))\n X_species_train = ohe.transform(\n meta_train['species'].values.reshape(-1, 1)\n )\n X_species_test = ohe.transform(\n meta_test['species'].values.reshape(-1, 1)\n )\n\n logging.info('Created species-only feature vectors')\n\n for [X_train, X_test], t in zip(\n [[X_species_train, X_species_test], [X_train, X_test]], \n ['no_spectra', '']\n ):\n # Prepare the output dictionary containing all information to\n # reproduce the experiment.\n output = {\n 'site': site,\n 'seed': args.seed,\n 'model': args.model,\n 'antibiotic': args.antibiotic,\n 'years': years,\n }\n\n # Add fingerprint information about the metadata files to make sure\n # that the experiment is reproducible.\n output['metadata_versions'] = metadata_fingerprints\n\n output_filename = generate_output_filename(\n args.output,\n output,\n suffix=t\n )\n\n # Add this information after generating a file name because\n # I want it to be kept out of there. This is slightly hacky\n # but only required for this one experiment.\n output['species'] = 'all' if not t else 'all (w/o spectra)'\n\n # Only write if we either are running in `force` mode, or the\n # file does not yet exist.\n if not os.path.exists(output_filename) or args.force:\n\n n_folds = 5\n\n results = run_experiment(\n X_train, y_train,\n X_test, y_test,\n args.model,\n n_folds,\n random_state=args.seed, # use seed whenever possible\n verbose=True # want info about best model etc.\n )\n\n output.update(results)\n\n logging.info(f'Saving {os.path.basename(output_filename)}')\n\n with open(output_filename, 'w') as f:\n json.dump(output, f, indent=4)\n else:\n logging.warning(\n f'Skipping {output_filename} because it already exists.'\n )\n","sub_path":"amr_maldi_ml/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"563493100","text":"\r\n\"\"\" bitDP (集合をbitのフラグで管理するDP) \"\"\"\r\n# dp[S][v] := 頂点0でスタートし、{0,1,2,...,n-1}の部分集合Sを巡回する[S]!通りの経路のうち、最後に頂点vに到達した最短距離\r\n# 更新式:dp[S U {v}][v] = min( dp[S][u] + cost(u,v) )\r\n# 計算量 O(n^2 * 2^n) (全探索するとO(n!)で膨大)\r\n\r\nv,e = map(int, input().split())\r\ng = [[float('inf')]*v for _ in range(v)]\r\nfor i in range(e):\r\n s,t,d = map(int, input().split())\r\n g[s-1][t-1] = d\r\ndp = [[float('inf')]*v for _ in range(2**v)] # 各bitが2値なので、dp長さは2^v必要\r\ndp[0][0] = 0\r\n\r\nfor s in range(2**v):\r\n for i in range(v): # 配られる側\r\n for j in range(v): # 配る側\r\n\r\n # j番目の頂点に訪れていない、またはどの頂点にも訪れていない場合 \r\n if not (s>>j)&1 and s!=0:\r\n continue\r\n\r\n # i番目の頂点に訪れていない場合 (j番目の頂点には訪れている)\r\n if (s>>i)&1==0:\r\n\r\n # 更新式\r\n if dp[s][j]+g[j][i] < dp[s|(1< set:\n return set(parent_cls.__subclasses__()).union(\n [s for c in parent_cls.__subclasses__() for s in self.all_subclasses(c)])\n\n def get_event(self, event, data):\n super().get_event(event, data)\n if self.selected_token_type:\n if \"mouse_left\" in event:\n if self.board.is_in_container(data[0], data[1]):\n keys = self.board.window.get_keys()\n if \"L_SHIFT\" in keys:\n for i in range(self.board.rows):\n for j in range(self.board.columns):\n self.selected_token_type((j, i))\n else:\n try:\n import miniworldmaker.board_positions.board_position as bp\n self.selected_token_type(position=bp.BoardPosition.from_pixel(data))\n except TypeError:\n print(\"Can't create tokens with more than one parameter position yet\")\n\n elif \"wheel_up\" in event or \"wheel_down\" in event:\n if self.board.is_in_container(data[0], data[1]):\n token = self.board.get_token_in_area(data)\n for cls in token.__class__.__mro__:\n if cls.__name__ == \"Actor\":\n if event == \"wheel_up\":\n token.turn_left(5)\n elif event == \"wheel_down\":\n token.turn_right(5)\n elif \"mouse_motion\" in event:\n if pygame.mouse.get_pressed()[0] == 1:\n if self.board.is_in_container(data[0], data[1]):\n rect = board_position.BoardPosition(data[0], data[1]).to_rect()\n token = self.board.get_tokens_at_rect(rect, singleitem=True)\n if token.__class__ != self.selected_token_type:\n import miniworldmaker.board_positions.board_position as bp\n token = self.selected_token_type(position=bp.BoardPosition.from_pixel(data))\n if \"mouse_right\" in event:\n if self.board.is_in_container(data[0], data[1]):\n keys = self.board.window.get_keys()\n if \"L_SHIFT\" in keys:\n tokens = self.board.get_tokens_by_pixel(data)\n while tokens:\n token = tokens.pop()\n token.remove()\n else:\n tokens = self.board.get_tokens_by_pixel(data)\n if tokens:\n tokens[0].turn_left(5)\n\n\nclass TokenButton(ToolbarWidget):\n\n def __init__(self, token_type, board, parent):\n super().__init__()\n self.parent = parent\n self.board = board\n # token = token_type(position = None)\n print(token_type, token_type.class_image)\n if token_type.class_image:\n self._img_path = token_type.class_image\n self._text_padding = 30\n self.set_text(\"Add \" + token_type.__name__)\n self.token_type = token_type\n self.background_color = (180, 180, 180, 255)\n\n def get_event(self, event, data):\n if event == \"mouse_left\":\n self.parent.window.send_event_to_containers(\"Selected actor\", self.token_type)\n self.parent.selected_token_type = self.token_type\n for widget in self.parent.widgets:\n if widget.__class__ == TokenButton:\n widget.background_color = (180, 180, 180, 255)\n widget.dirty = 1\n self.background_color = (100, 100, 100, 255)\n self.dirty = 1\n","sub_path":"source/miniworldmaker/containers/level_designer_toolbar.py","file_name":"level_designer_toolbar.py","file_ext":"py","file_size_in_byte":5373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"545780882","text":"def s_shape(l):\n D = Device()\n wg = D << mn.waveguide(width=w,length=l).movex(-l/2).rotate(90)\n b1 = D << pg.turn(wg.ports[1],angle=-90,radius=bend,angle_resolution=2)\n b2 = D << pg.turn(wg.ports[2],angle=-90,radius=bend,angle_resolution=2)\n# D << mn.waveguide()\n return D\n\nD = Device('simple')\n\nl = 8e3\nwg0 = D << mn.waveguide(w,2*l).movex(-l)\n\nbend = 100\nw = 1\n\nstep = 200\nys = 0\nfor i in range(4):\n ys += (i+1)*step\n S = Device('S_shape')\n h=i*step\n S << pg.copy(s_shape(h))\n S << mn.waveguide(w,l-bend).move((bend,h/2+bend))\n S << mn.waveguide(w,l-bend).move((-l,-h/2-bend))\n D << S.movey(ys)\n\nD.flatten()\nqp(D)\nD.write_gds('simple.gds')\n\n# D = Device('simple')\n\n# l = 5e3\n\n# bend = 100\n# w = 1\n\n# wg0 = D << mn.waveguide(w,2*l).movex(-l)\n\n\n# step = 200\n# ys = 0\n# for i in range(4):\n# ys += (i+1)*step\n# S = Device('S_shape')\n# h=i*step\n# S << pg.copy(s_shape(h))\n# S << mn.waveguide(w,l-bend).move((bend,h/2+bend))\n# S << mn.waveguide(w,l-bend).move((-l,-h/2-bend))\n# D << S.movey(ys)\n\n# D.flatten()\n\n# D_set = Device('final')\n\n# D_set << pg.deepcopy(D)\n# D_set << pg.deepcopy(D).movey(3e3)\n# D_set << pg.deepcopy(D).movey(6e3)\n\n# qp(D_set)\n# D_set.write_gds('simple_set.gds')\n","sub_path":"work_log/zshape_array.py","file_name":"zshape_array.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"266759574","text":"import numpy as np\nimport pandas as pd\n\n# import geopandas as gpd\nfrom IPython.display import Image\n\n# from shapely.geometry import Point, Polygon\nimport time, datetime\nimport scipy, math\nfrom math import factorial\n\nfrom statsmodels.sandbox.regression.predstd import wls_prediction_std\nfrom sklearn.linear_model import LinearRegression\nfrom patsy import cr\n\nfrom pprint import pprint\nimport matplotlib.pyplot as plt\nimport seaborn as sb\n\nimport os, os.path, sys\n\nfrom tensorflow.keras.utils import to_categorical, load_img, img_to_array\n\n# from keras.models import Sequential, Model, load_model\n# from keras.applications.vgg16 import VGG16\n# import tensorflow as tf\n\n# # from keras.optimizers import SGD\n# from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D\n# from tensorflow.keras.optimizers import SGD\n# from keras.preprocessing.image import ImageDataGenerator\n\n###\n### These will be more generalized functions of remote_sensing_core.py\n### Hence, less hard coding, which implies column/variavle wise we\n### will be minimalistic. e.g. column: lastSurveydate should not be included\n### here.\n###\n\n###########################################################\n\n\ndef regularize_a_field_annual_basis(\n a_df, V_idks=\"NDVI\", interval_size=10, start_year=2008, end_year=2021\n):\n \"\"\"\n This is a modification of regularize_a_field() function.\n The update is that this function is \"less flexible\"!!!\n Here we will have intervals go from Jan 1-Jan 10, and so on.\n In other words, the time origin is Jan 1.\n\n In the regularize_a_field() function the origin of time\n was the first data point! So, we ended up having 36 data for\n some years and 37 for some other when we were looking at 3 years of data!\n The root cause was that ML was not part of the plan.... same old, same old!\n \"\"\"\n \"\"\"Returns a dataframe where data points are interval_size-day apart.\n This function regularizes the data between the minimum and maximum dates\n present in the data. \n\n Arguments\n ---------\n a_df : dataframe \n of a given field for only one satellite\n\n Returns\n -------\n regularized_df : dataframe\n \"\"\"\n if not (\"human_system_start_time\" in a_df.columns):\n a_df = add_human_start_time_by_system_start_time(a_df)\n\n a_df[\"human_system_start_time\"] = pd.to_datetime(a_df[\"human_system_start_time\"])\n a_df.sort_values(by=\"human_system_start_time\", inplace=True)\n a_df.reset_index(drop=True, inplace=True)\n\n assert len(a_df.ID.unique()) == 1\n # assert (len(a_df.dataset.unique()) == 1)\n #\n # see how many days there are between the first and last image\n #\n a_df_coverage_days = (\n max(a_df.human_system_start_time) - min(a_df.human_system_start_time)\n ).days\n assert a_df_coverage_days >= interval_size\n\n # see how many data points we need.\n all_years = sorted(a_df.human_system_start_time.dt.year.unique())\n no_steps_per_year = 365 // interval_size\n no_steps = len(all_years) * no_steps_per_year\n\n \"\"\"\n I am reducing the flexibility of the code we had before!\n I want to make it that all fields have the same exact dates\n for their time steps. Jan. 1, Jan 10, ...\n \"\"\"\n regular_time_stamps = []\n for a_year in all_years:\n regular_time_stamps = regular_time_stamps + list(\n pd.date_range(\n pd.Timestamp(str(a_year) + \"-01-01\"),\n pd.Timestamp(str(a_year) + \"-12-25\"),\n freq=str(interval_size) + \"D\",\n )\n )\n\n # initialize output dataframe\n if \"dataset\" in a_df.columns:\n regular_cols = [\"ID\", \"dataset\", \"human_system_start_time\", V_idks]\n else:\n regular_cols = [\"ID\", \"human_system_start_time\", V_idks]\n\n regular_df = pd.DataFrame(data=None, index=np.arange(no_steps), columns=regular_cols)\n\n regular_df[\"ID\"] = a_df.ID.unique()[0]\n if \"dataset\" in a_df.columns:\n regular_df[\"dataset\"] = a_df.dataset.unique()[0]\n\n if len(regular_time_stamps) == no_steps + 1:\n regular_df.human_system_start_time = regular_time_stamps[:-1]\n elif len(regular_time_stamps) == no_steps:\n regular_df.human_system_start_time = regular_time_stamps\n else:\n raise ValueError(\n f\"There is a mismatch between no. days needed and '{interval_size}-day' interval array!\"\n )\n\n # Pick the maximum of every interval_size-days\n for start_date in regular_df.human_system_start_time:\n \"\"\"\n The following will crate an array (of length 2)\n it goes from a day to 10 days later; end points of the interval_size-day interval.\n\n # Here we add 1 day to the right end point (end_date)\n because the way pandas/python slices the dataframe;\n does not include the last row of sub-dataframe\n \"\"\"\n dateRange = pd.date_range(\n start_date, start_date + pd.Timedelta(days=interval_size - 1), freq=str(1) + \"D\"\n )\n assert len(dateRange) == interval_size\n\n curr_time_window = a_df[a_df.human_system_start_time.isin(dateRange)]\n if len(curr_time_window) == 0:\n regular_df.loc[regular_df.human_system_start_time == start_date, V_idks] = -1.5\n else:\n regular_df.loc[regular_df.human_system_start_time == start_date, V_idks] = max(\n curr_time_window[V_idks]\n )\n ##### end the for-loop\n regular_df.reset_index(drop=True, inplace=True)\n return regular_df\n\n\ndef create_calendar_table(SF_year):\n start = str(SF_year) + \"-01-01\"\n end = str(SF_year) + \"-12-31\"\n\n df = pd.DataFrame({\"human_system_start_time\": pd.date_range(start, end)})\n\n # add day of year\n df[\"doy\"] = 1 + np.arange(len(df))\n\n # df['Weekday'] = df['Date'].dt.day_name()\n\n # Drop the last element if the year is leap-year.\n # we want the data to have equal size\n if len(df) == 366:\n df.drop(index=365, axis=0, inplace=True)\n return df\n\n\ndef filter_out_NASS(dt_df):\n dt_cf_NASS = dt_df.copy()\n dt_cf_NASS[\"DataSrc\"] = dt_cf_NASS[\"DataSrc\"].astype(str)\n dt_cf_NASS[\"DataSrc\"] = dt_cf_NASS[\"DataSrc\"].str.lower()\n\n dt_cf_NASS = dt_cf_NASS[~dt_cf_NASS[\"DataSrc\"].str.contains(\"nass\")]\n return dt_cf_NASS\n\n\ndef filter_by_lastSurvey(dt_df_su, year):\n dt_surv = dt_df_su.copy()\n dt_surv = dt_surv[dt_surv[\"LstSrvD\"].str.contains(str(year))]\n return dt_surv\n\n\ndef filter_out_nonIrrigated(dt_df_irr):\n dt_irrig = dt_df_irr.copy()\n #\n # drop NA rows in irrigation column\n #\n dt_irrig.dropna(subset=[\"Irrigtn\"], inplace=True)\n\n dt_irrig[\"Irrigtn\"] = dt_irrig[\"Irrigtn\"].astype(str)\n\n dt_irrig[\"Irrigtn\"] = dt_irrig[\"Irrigtn\"].str.lower()\n dt_irrig = dt_irrig[~dt_irrig[\"Irrigtn\"].str.contains(\"none\")]\n dt_irrig = dt_irrig[~dt_irrig[\"Irrigtn\"].str.contains(\"unknown\")]\n dt_irrig = dt_irrig[~dt_irrig[\"Irrigtn\"].str.contains(\"empty\")]\n\n return dt_irrig\n\n\ndef Null_SOS_EOS_by_DoYDiff(pd_TS, min_season_length=40):\n \"\"\"\n input: pd_TS is a pandas dataframe\n it includes a column SOS and a column EOS\n\n output: create a vector that measures distance between DoY\n of an SOS and corresponding EOS.\n\n It is possible that the number of one of the SOS and EOS is\n different from the other. (perhaps just by 1)\n\n So, we need to keep that in mind.\n \"\"\"\n pd_TS_DoYDiff = pd_TS.copy()\n\n # find indexes of SOS and EOS\n SOS_indexes = pd_TS_DoYDiff.index[pd_TS_DoYDiff[\"SOS\"] != 0].tolist()\n EOS_indexes = pd_TS_DoYDiff.index[pd_TS_DoYDiff[\"EOS\"] != 0].tolist()\n\n \"\"\"\n It seems it is possible to only have 1 SOS with no EOS. (or vice versa).\n In this case we can consider we only have 1 season!\n \"\"\"\n \"\"\"\n We had the following in the code, which is fine for computing \n the tables (since we count the seasons by counting SOS), but, if \n there is no SOS and only 1 EOS, then the EOS will not be nullified. and will show\n up in the plots.\n\n if len(SOS_indexes) == 0 or len(EOS_indexes) == 0:\n return pd_TS_DoYDiff\n \"\"\"\n # if len(SOS_indexes) == 0 or len(EOS_indexes) == 0:\n # return pd_TS_DoYDiff\n\n if len(SOS_indexes) == 0:\n if len(EOS_indexes) == 0:\n return pd_TS_DoYDiff\n else:\n if len(EOS_indexes) == 1:\n EOS_indexes[0] = 0\n pd_TS_DoYDiff.EOS = 0\n return pd_TS_DoYDiff\n\n else:\n raise ValueError(\"too many EOS and no SOS whatsoever!\")\n\n if len(EOS_indexes) == 0:\n if len(SOS_indexes) == 1:\n return pd_TS_DoYDiff\n else:\n raise ValueError(\"too many SOS and no EOS whatsoever!\")\n\n SOS_indexes = pd_TS_DoYDiff.index[pd_TS_DoYDiff[\"SOS\"] != 0].tolist()\n EOS_indexes = pd_TS_DoYDiff.index[pd_TS_DoYDiff[\"EOS\"] != 0].tolist()\n\n \"\"\"\n First we need to fix the prolems such as having 2 SOS and only 1 EOS, or,\n 2 EOS and only 1 SOS, or,\n it is possible that number of SOSs and number of EOSs are identical,\n but the plot starts with EOS and ends with SOS.\n \"\"\"\n #\n # Check if first EOS is less than first SOS\n #\n SOS_pointer = SOS_indexes[0]\n EOS_pointer = EOS_indexes[0]\n if (\n pd_TS_DoYDiff.loc[EOS_pointer, \"human_system_start_time\"]\n < pd_TS_DoYDiff.loc[SOS_pointer, \"human_system_start_time\"]\n ):\n # Remove the false EOS from dataFrame\n pd_TS_DoYDiff.loc[EOS_pointer, \"EOS\"] = 0\n\n # remove the first element of EOS indexes\n EOS_indexes.pop(0)\n\n #\n # Check if last SOS is greater than last EOS\n #\n if len(EOS_indexes) == 0 or len(EOS_indexes) == 0:\n return pd_TS_DoYDiff\n\n SOS_pointer = SOS_indexes[-1]\n EOS_pointer = EOS_indexes[-1]\n if (\n pd_TS_DoYDiff.loc[EOS_pointer, \"human_system_start_time\"]\n < pd_TS_DoYDiff.loc[SOS_pointer, \"human_system_start_time\"]\n ):\n # Remove the false EOS from dataFrame\n pd_TS_DoYDiff.loc[SOS_pointer, \"SOS\"] = 0\n\n # remove the first element of EOS indexes\n SOS_indexes.pop()\n\n if len(SOS_indexes) != len(EOS_indexes):\n #\n # in this case we have an extra SOS (at the end) or EOS (at the beginning)\n #\n print(\"Error occured at {}.\".format(pd_TS.ID.unique()[0]))\n # print (pd_TS.image_year.unique()[0])\n raise ValueError(\"SOS and EOS are not of the same length.\")\n\n \"\"\"\n Go through seasons and invalidate them if their length is too short\n \"\"\"\n for ii in np.arange(len(SOS_indexes)):\n SOS_pointer = SOS_indexes[ii]\n EOS_pointer = EOS_indexes[ii]\n\n current_growing_season_Length = (\n pd_TS_DoYDiff.loc[EOS_pointer, \"human_system_start_time\"]\n - pd_TS_DoYDiff.loc[SOS_pointer, \"human_system_start_time\"]\n ).days\n\n # Kill/invalidate season if its length is too short.\n if current_growing_season_Length < min_season_length:\n pd_TS_DoYDiff.loc[SOS_pointer, \"SOS\"] = 0\n pd_TS_DoYDiff.loc[EOS_pointer, \"EOS\"] = 0\n\n return pd_TS_DoYDiff\n\n\ndef addToDF_SOS_EOS_White(pd_TS, VegIdx=\"EVI\", onset_thresh=0.3, offset_thresh=0.3):\n \"\"\"\n In this methods the NDVI_Ratio = (NDVI - NDVI_min) / (NDVI_Max - NDVI_min)\n is computed.\n\n SOS or onset is when NDVI_ratio exceeds onset-threshold\n and EOS is when NDVI_ratio drops below off-set-threshold.\n \"\"\"\n pandaFrame = pd_TS.copy()\n\n VegIdx_min = pandaFrame[VegIdx].min()\n VegIdx_max = pandaFrame[VegIdx].max()\n VegRange = VegIdx_max - VegIdx_min + sys.float_info.epsilon\n\n colName = VegIdx + \"_ratio\"\n pandaFrame[colName] = (pandaFrame[VegIdx] - VegIdx_min) / VegRange\n\n # if (onset_thresh == offset_thresh):\n # SOS_EOS_candidates = pandaFrame[colName] - onset_thresh\n # sign_change = find_signChange_locs_EqualOnOffset(SOS_EOS_candidates.values)\n # else:\n # SOS_candidates = pandaFrame[colName] - onset_thresh\n # EOS_candidates = offset_thresh - pandaFrame[colName]\n # sign_change = find_signChange_locs_DifferentOnOffset(SOS_candidates.values, EOS_candidates.values)\n # pandaFrame['SOS_EOS'] = sign_change * pandaFrame[VegIdx]\n\n SOS_candidates = pandaFrame[colName] - onset_thresh\n EOS_candidates = offset_thresh - pandaFrame[colName]\n\n BOS, EOS = find_signChange_locs_DifferentOnOffset(SOS_candidates, EOS_candidates)\n pandaFrame[\"SOS\"] = BOS * pandaFrame[VegIdx]\n pandaFrame[\"EOS\"] = EOS * pandaFrame[VegIdx]\n\n return pandaFrame\n\n\ndef find_signChange_locs_DifferentOnOffset(SOS_candids, EOS_candids):\n if type(SOS_candids) != np.ndarray:\n SOS_candids = SOS_candids.values\n\n if type(EOS_candids) != np.ndarray:\n EOS_candids = EOS_candids.values\n\n SOS_sign_change = np.zeros(len(SOS_candids))\n EOS_sign_change = np.zeros(len(EOS_candids))\n\n pointer = 0\n for pointer in np.arange(0, len(SOS_candids) - 1):\n \"\"\"\n On Feb. 23, 2023 we came upon a rare case where SOS_candids[pointer+1] was exactly zero!\n So, we changed the line if SOS_candids[pointer+1] >0: to if SOS_candids[pointer+1] >= 0:.\n \"\"\"\n\n if SOS_candids[pointer] < 0:\n if SOS_candids[pointer + 1] >= 0:\n # if SOS_candids[pointer]*SOS_candids[pointer+1]<=0:\n SOS_sign_change[pointer + 1] = 1\n\n if EOS_candids[pointer] < 0:\n if EOS_candids[pointer + 1] >= 0:\n # if EOS_candids[pointer]*EOS_candids[pointer+1]<=0:\n EOS_sign_change[pointer + 1] = 1\n\n # sign_change = SOS_sign_change + EOS_sign_change\n return (SOS_sign_change, EOS_sign_change)\n\n\ndef correct_big_jumps_1DaySeries_JFD(dataTMS_jumpie, give_col, maxjump_perDay=0.015):\n \"\"\"\n This is a modified version of correct_big_jumps_1DaySeries()\n Here if the big jumps happen in Dec. Jan, or Feb. we take the high value down\n (as opposed to lower value up)\n\n Returns: a dataframe with no big jumps in it\n Arguments\n ---------\n dataTMS_jumpie : dataframe\n A dataframe in which\n\n give_col : String\n A string indicating which column/VI should be filled in.\n\n Returns\n -------\n dataTMS_jumpie : dataframe\n the same dataframe with no big jumps! (just one iteration)\n \"\"\"\n # dataTMS_jumpie = initial_clean(df = dataTMS_jumpie, column_to_be_cleaned = give_col)\n\n dataTMS_jumpie[\"human_system_start_time\"] = pd.to_datetime(\n dataTMS_jumpie[\"human_system_start_time\"]\n )\n dataTMS_jumpie.sort_values(by=[\"human_system_start_time\"], inplace=True)\n dataTMS_jumpie.reset_index(drop=True, inplace=True)\n\n thyme_vec = dataTMS_jumpie[\"human_system_start_time\"].values.copy()\n Veg_indks = dataTMS_jumpie[give_col].values.copy()\n\n time_diff = (\n pd.to_datetime(thyme_vec[1:]) - pd.to_datetime(thyme_vec[0 : len(thyme_vec) - 1])[0]\n ).days\n\n Veg_indks_diff = Veg_indks[1:] - Veg_indks[0 : len(thyme_vec) - 1]\n jump_indexes = np.where(Veg_indks_diff > maxjump_perDay)\n jump_indexes = jump_indexes[0]\n jump_indexes = jump_indexes.tolist()\n\n thyme_vec = dataTMS_jumpie[\"human_system_start_time\"].values.copy()\n Veg_indks = dataTMS_jumpie[give_col].values.copy()\n time_diff = thyme_vec[1:] - thyme_vec[0 : len(thyme_vec) - 1]\n\n # time_diff_in_days = time_diff / 86400\n time_diff_in_days = time_diff.astype(\"timedelta64[D]\")\n time_diff_in_days = time_diff_in_days.astype(int)\n\n # It is possible that the very first one has a big jump in it.\n # we cannot interpolate this. so, lets just skip it.\n if len(jump_indexes) > 0:\n if jump_indexes[0] == 0:\n jump_indexes.pop(0)\n\n if len(jump_indexes) > 0:\n for jp_idx in jump_indexes:\n # for count, jp_idx in enumerate(jump_indexes):\n # Veg_indks_diff >= (time_diff_in_days * maxjump_perDay)\n if Veg_indks_diff[jp_idx] >= (time_diff_in_days[jp_idx] * maxjump_perDay):\n #\n # form a line using the adjacent points of the big jump:\n #\n if pd.to_datetime(thyme_vec[jp_idx]).month in [1, 2, 12]:\n # take the big value down in Jan, Feb, or Dec.\n \"\"\"\n It is possible that the big jump is the last data point.\n In this case, let it go!\n Perhaps we can do this in a faster way: remove the indices from jump_indexes\n above. rather than checking the if statement below. Or maybe not?\n We have 2 cases here: Jan-Feb-Dec and other months!\n \"\"\"\n if (jp_idx + 2) < len(Veg_indks):\n x1, y1 = thyme_vec[jp_idx], Veg_indks[jp_idx]\n x2, y2 = thyme_vec[jp_idx + 2], Veg_indks[jp_idx + 2]\n\n m = float(y2 - y1) / (x2 - x1).astype(pd.Timedelta) # slope or float(x2-x1)\n b = y2 - (m * int(x2)) # intercept\n\n # replace the big jump with linear interpolation\n # only if the new value is smaller that it was in the raw\n new_val = m * thyme_vec[jp_idx + 1].astype(int) + b\n if new_val < Veg_indks[jp_idx + 1]:\n Veg_indks[jp_idx + 1] = new_val\n else:\n \"\"\"\n It is possible that the big jump is the last data point.\n or first one. In these cases let it go!!!\n \"\"\"\n if (jp_idx + 1) < len(Veg_indks) and (jp_idx - 1) >= 0:\n # take the low value upper, in !(Jan, Feb, Dec)\n x1, y1 = thyme_vec[jp_idx - 1], Veg_indks[jp_idx - 1]\n x2, y2 = thyme_vec[jp_idx + 1], Veg_indks[jp_idx + 1]\n\n m = float(y2 - y1) / (x2 - x1).astype(pd.Timedelta) # slope or float(x2-x1)\n b = y2 - (m * int(x2)) # intercept\n\n # replace the big jump with linear interpolation\n Veg_indks[jp_idx] = m * thyme_vec[jp_idx].astype(int) + b\n\n dataTMS_jumpie[give_col] = Veg_indks\n return dataTMS_jumpie\n\n\ndef correct_big_jumps_1DaySeries(dataTMS_jumpie, give_col, maxjump_perDay=0.015):\n \"\"\"\n in the function correct_big_jumps_preDefinedJumpDays(.) we have\n to define the jump_amount and the no_days_between_points.\n For example if we have a jump more than 0.4 in less than 20 dats, then\n that is an outlier detected.\n\n Here we modify the approach to be flexible in the following sense:\n if the amount of increase in NDVI is more than #_of_Days * 0.02 then\n an outlier is detected and we need interpolation.\n\n 0.015 came from the SG based paper that used 0.4 jump in NDVI for 20 days.\n That translates into 0.02 = 0.4 / 20 per day.\n But we did choose 0.015 as default\n \"\"\"\n dataTMS_jumpie = initial_clean(df=dataTMS_jumpie, column_to_be_cleaned=give_col)\n\n dataTMS_jumpie[\"human_system_start_time\"] = pd.to_datetime(\n dataTMS_jumpie[\"human_system_start_time\"]\n )\n dataTMS_jumpie.sort_values(by=[\"human_system_start_time\"], inplace=True)\n dataTMS_jumpie.reset_index(drop=True, inplace=True)\n\n thyme_vec = dataTMS_jumpie[\"human_system_start_time\"].values.copy()\n Veg_indks = dataTMS_jumpie[give_col].values.copy()\n\n time_diff = (\n pd.to_datetime(thyme_vec[1:]) - pd.to_datetime(thyme_vec[0 : len(thyme_vec) - 1])[0]\n ).days\n\n Veg_indks_diff = Veg_indks[1:] - Veg_indks[0 : len(thyme_vec) - 1]\n jump_indexes = np.where(Veg_indks_diff > maxjump_perDay)\n jump_indexes = jump_indexes[0]\n jump_indexes = jump_indexes.tolist()\n\n thyme_vec = dataTMS_jumpie[\"human_system_start_time\"].values.copy()\n Veg_indks = dataTMS_jumpie[give_col].values.copy()\n time_diff = thyme_vec[1:] - thyme_vec[0 : len(thyme_vec) - 1]\n\n # time_diff_in_days = time_diff / 86400\n time_diff_in_days = time_diff.astype(\"timedelta64[D]\")\n time_diff_in_days = time_diff_in_days.astype(int)\n\n # It is possible that the very first one has a big jump in it.\n # we cannot interpolate this. so, lets just skip it.\n if len(jump_indexes) > 0:\n if jump_indexes[0] == 0:\n jump_indexes.pop(0)\n if len(jump_indexes) > 0:\n for jp_idx in jump_indexes:\n if Veg_indks_diff[jp_idx] >= (time_diff_in_days[jp_idx] * maxjump_perDay):\n #\n # form a line using the adjacent points of the big jump:\n #\n x1, y1 = thyme_vec[jp_idx - 1], Veg_indks[jp_idx - 1]\n x2, y2 = thyme_vec[jp_idx + 1], Veg_indks[jp_idx + 1]\n if (x2 - x1).astype(pd.Timedelta) == 0:\n print(jp_idx)\n m = float(y2 - y1) / (x2 - x1).astype(pd.Timedelta) # slope or float(x2-x1)\n b = y2 - (m * int(x2)) # intercept\n\n # replace the big jump with linear interpolation\n Veg_indks[jp_idx] = m * thyme_vec[jp_idx].astype(int) + b\n\n dataTMS_jumpie[give_col] = Veg_indks\n return dataTMS_jumpie\n\n\ndef interpolate_outliers_EVI_NDVI(outlier_input, given_col):\n \"\"\"\n outliers are those that are beyond boundaries. For example and EVI value of 2.\n Big jump in the other function means we have a big jump but we are still\n within the region of EVI values. If in 20 days we have a jump of 0.3 then that is noise.\n\n in 2017 data I did not see outlier in NDVI. It only happened in EVI.\n \"\"\"\n outlier_input = initial_clean(df=outlier_input, column_to_be_cleaned=given_col)\n\n outlier_input[\"human_system_start_time\"] = pd.to_datetime(\n outlier_input[\"human_system_start_time\"]\n )\n assert len(outlier_input.ID.unique()) == 1\n\n # ID below is for sanity check. otherwise the input must be one field\n outlier_input.sort_values(by=[\"ID\", \"human_system_start_time\"], inplace=True)\n outlier_input.reset_index(drop=True, inplace=True)\n\n # 1st block\n time_vec = outlier_input[\"human_system_start_time\"].values.copy()\n vec = outlier_input[given_col].values.copy()\n\n # find out where are outliers\n high_outlier_inds = np.where(vec > 1)[0]\n low_outlier_inds = np.where(vec < -1)[0]\n\n all_outliers_idx = np.concatenate((high_outlier_inds, low_outlier_inds))\n all_outliers_idx = np.sort(all_outliers_idx)\n non_outiers = np.arange(len(vec))[~np.in1d(np.arange(len(vec)), all_outliers_idx)]\n\n # 2nd block\n if len(all_outliers_idx) == 0:\n return outlier_input\n\n \"\"\"\n it is possible that for a field we only have x=2 data points\n where all the EVI/NDVI is outlier. Then, there is nothing to \n use for interpolation. So, we return an empty datatable\n \"\"\"\n if len(all_outliers_idx) == len(outlier_input):\n outlier_input = initial_clean(df=outlier_input, column_to_be_cleaned=given_col)\n outlier_input = outlier_input[outlier_input[given_col] < 1.5]\n outlier_input = outlier_input[outlier_input[given_col] > -1.5]\n return outlier_input\n\n # 3rd block\n\n # Get rid of outliers that are at the beginning of the time series\n # if len(non_outiers) > 0 :\n if non_outiers[0] > 0:\n vec[0 : non_outiers[0]] = vec[non_outiers[0]]\n\n # find out where are outliers\n high_outlier_inds = np.where(vec > 1)[0]\n low_outlier_inds = np.where(vec < -1)[0]\n\n all_outliers_idx = np.concatenate((high_outlier_inds, low_outlier_inds))\n all_outliers_idx = np.sort(all_outliers_idx)\n non_outiers = np.arange(len(vec))[~np.in1d(np.arange(len(vec)), all_outliers_idx)]\n if len(all_outliers_idx) == 0:\n outlier_input[given_col] = vec\n return outlier_input\n\n # 4th block\n # Get rid of outliers that are at the end of the time series\n if non_outiers[-1] < (len(vec) - 1):\n vec[non_outiers[-1] :] = vec[non_outiers[-1]]\n\n # find out where are outliers\n high_outlier_inds = np.where(vec > 1)[0]\n low_outlier_inds = np.where(vec < -1)[0]\n\n all_outliers_idx = np.concatenate((high_outlier_inds, low_outlier_inds))\n all_outliers_idx = np.sort(all_outliers_idx)\n non_outiers = np.arange(len(vec))[~np.in1d(np.arange(len(vec)), all_outliers_idx)]\n if len(all_outliers_idx) == 0:\n outlier_input[given_col] = vec\n return outlier_input\n \"\"\"\n At this point outliers are in the middle of the vector\n and beginning and the end of the vector are clear.\n \"\"\"\n for out_idx in all_outliers_idx:\n \"\"\"\n Right here at the beginning we should check\n if vec[out_idx] is outlier or not. The reason is that\n there might be consecutive outliers at position m and m+1\n and we fix the one at m+1 when we are fixing m ...\n \"\"\"\n # if ~(vec[out_idx] <= 1 and vec[out_idx] >= -1):\n if vec[out_idx] >= 1 or vec[out_idx] <= -1:\n left_pointer = out_idx - 1\n right_pointer = out_idx + 1\n while ~(vec[right_pointer] <= 1 and vec[right_pointer] >= -1):\n right_pointer += 1\n\n # form the line and fill in the outlier valies\n x1, y1 = time_vec[left_pointer], vec[left_pointer]\n x2, y2 = time_vec[right_pointer], vec[right_pointer]\n\n time_diff = x2 - x1\n y_diff = y2 - y1\n\n slope = y_diff / time_diff.astype(pd.Timedelta)\n intercept = y2 - (slope * int(x2))\n vec[left_pointer + 1 : right_pointer] = (\n slope * ((time_vec[left_pointer + 1 : right_pointer]).astype(int)) + intercept\n )\n outlier_input[given_col] = vec\n return outlier_input\n\n\ndef initial_clean(df, column_to_be_cleaned):\n # dt_copy = df.copy()\n # remove the useles system:index column\n if \"system:index\" in list(df.columns):\n df = df.drop(columns=[\"system:index\"])\n df.drop_duplicates(inplace=True)\n\n if \"human_system_start_time\" in df.columns:\n df[\"human_system_start_time\"] = pd.to_datetime(df[\"human_system_start_time\"])\n\n # Drop rows whith NA in column_to_be_cleaned column.\n df = df[df[column_to_be_cleaned].notna()]\n\n if column_to_be_cleaned in [\"NDVI\", \"EVI\"]:\n df.loc[df[column_to_be_cleaned] > 1, column_to_be_cleaned] = 1.5\n df.loc[df[column_to_be_cleaned] < -1, column_to_be_cleaned] = -1.5\n return df\n\n\ndef fill_theGap_linearLine(a_regularized_TS, V_idx=\"NDVI\"):\n \"\"\"Returns a dataframe that has replaced the missing parts of regular_TS.\n\n Arguments\n ---------\n regular_TS : dataframe\n A regularized (data points are squidistant from each other) dataframe\n with missing data points; -1.5 is indication of missing values.\n This dataframe is the output of the function regularize_a_field(.)\n We will assume the regular_TS is for a given unique field from a given unique satellite.\n\n V_idx : String\n A string indicating which column/VI should be filled in.\n\n Returns\n -------\n regular_TS : dataframe\n the same dataframe with missing data points filled in by linear interpolation\n \"\"\"\n # a_regularized_TS = regular_TS.copy()\n\n a_regularized_TS[\"human_system_start_time\"] = pd.to_datetime(\n a_regularized_TS[\"human_system_start_time\"]\n )\n TS_array = a_regularized_TS[V_idx].copy().values\n\n # aaa = a_regularized_TS[\"human_system_start_time\"].values[1]\n # bbb = a_regularized_TS[\"human_system_start_time\"].values[0]\n # time_step_size = (aaa - bbb).astype('timedelta64[D]')/np.timedelta64(1, 'D')\n\n \"\"\"\n -1.5 is an indicator of missing values, i.e. a gap.\n The -1.5 was used as indicator in the function regularize_movingWindow_windowSteps_2Yrs()\n \"\"\"\n missing_indicies = np.where(TS_array == -1.5)[0]\n Notmissing_indicies = np.where(TS_array != -1.5)[0]\n\n #\n # Check if the first or last k values are missing\n # if so, replace them with proper number and shorten the task\n #\n left_pointer = Notmissing_indicies[0]\n right_pointer = Notmissing_indicies[-1]\n\n if left_pointer > 0:\n TS_array[:left_pointer] = TS_array[left_pointer]\n\n if right_pointer < (len(TS_array) - 1):\n TS_array[right_pointer:] = TS_array[right_pointer]\n #\n # update indexes.\n #\n missing_indicies = np.where(TS_array == -1.5)[0]\n Notmissing_indicies = np.where(TS_array != -1.5)[0]\n\n # left_pointer = Notmissing_indicies[0]\n stop = right_pointer\n right_pointer = left_pointer + 1\n\n missing_indicies = np.where(TS_array == -1.5)[0]\n\n while len(missing_indicies) > 0:\n left_pointer = missing_indicies[0] - 1\n left_value = TS_array[left_pointer]\n right_pointer = missing_indicies[0]\n\n while TS_array[right_pointer] == -1.5:\n right_pointer += 1\n right_value = TS_array[right_pointer]\n\n if (right_pointer - left_pointer) == 2:\n # if there is a single gap, then we have just average of the\n # values\n # Avoid extra computation!\n #\n TS_array[left_pointer + 1] = 0.5 * (TS_array[left_pointer] + TS_array[right_pointer])\n missing_indicies = np.where(TS_array == -1.5)[0]\n else:\n # form y = ax + b\n # to see what the \"x_axis\" was look at the same function in remote_sensing_core.py\n\n # denom = (x_axis[right_pointer]-x_axis[left_pointer]).astype('timedelta64[D]')/ \\\n # np.timedelta64(int(time_step_size), 'D')\n\n denom = right_pointer - left_pointer\n slope = (right_value - left_value) / denom\n\n # b = right_value - (slope * x_axis[right_pointer])\n # 150 is a random number below.\n # The thing that matters is the number of steps, not actual values on x-axis.\n # I did it this way to avoid dealing with timestamp values and figuring out its stuff\n # Stuff means both finding out the right script and relation of timestamp to day of year stuff.\n # We can just use right_pointer itself instead of 150!\n b = right_value - (slope * right_pointer)\n TS_array[left_pointer + 1 : right_pointer] = (\n slope * np.arange(right_pointer - denom + 1, right_pointer) + b\n )\n missing_indicies = np.where(TS_array == -1.5)[0]\n\n a_regularized_TS[V_idx] = TS_array\n return a_regularized_TS\n\n\ndef is_leap_year(year):\n \"\"\"Determine whether a year is a leap year.\"\"\"\n\n return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)\n\n\ndef regularize_a_field(a_df, V_idks=\"NDVI\", interval_size=10, start_year=2008, end_year=2021):\n \"\"\"Returns a dataframe where data points are interval_size-day apart.\n This function regularizes the data between the minimum and maximum dates\n present in the data.\n\n Arguments\n ---------\n a_df : dataframe\n of a given field for only one satellite\n\n Returns\n -------\n regularized_df : dataframe\n \"\"\"\n if not (\"human_system_start_time\" in a_df.columns):\n a_df = add_human_start_time_by_system_start_time(a_df)\n\n a_df[\"human_system_start_time\"] = pd.to_datetime(a_df[\"human_system_start_time\"])\n a_df.sort_values(by=\"human_system_start_time\", inplace=True)\n a_df.reset_index(drop=True, inplace=True)\n\n assert len(a_df.ID.unique()) == 1\n # assert (len(a_df.dataset.unique()) == 1)\n #\n # see how many days there are between the first and last image\n #\n a_df_coverage_days = (\n max(a_df.human_system_start_time) - min(a_df.human_system_start_time)\n ).days\n assert a_df_coverage_days >= interval_size\n\n # see how many data points we need in terms of interval_size-day intervals for a_df_coverage_days\n no_steps = a_df_coverage_days // interval_size\n\n # initialize output dataframe\n if \"dataset\" in a_df.columns:\n regular_cols = [\"ID\", \"dataset\", \"human_system_start_time\", V_idks]\n else:\n regular_cols = [\"ID\", \"human_system_start_time\", V_idks]\n\n regular_df = pd.DataFrame(data=None, index=np.arange(no_steps), columns=regular_cols)\n\n regular_df[\"ID\"] = a_df.ID.unique()[0]\n if \"dataset\" in a_df.columns:\n regular_df[\"dataset\"] = a_df.dataset.unique()[0]\n\n # the following is an array of time stamps where each entry is the beginning\n # of the interval_size-day period\n regular_time_stamps = pd.date_range(\n min(a_df.human_system_start_time),\n max(a_df.human_system_start_time),\n freq=str(interval_size) + \"D\",\n )\n\n if len(regular_time_stamps) == no_steps + 1:\n regular_df.human_system_start_time = regular_time_stamps[:-1]\n elif len(regular_time_stamps) == no_steps:\n regular_df.human_system_start_time = regular_time_stamps\n else:\n raise ValueError(\n f\"There is a mismatch between no. days needed and '{interval_size}-day' interval array!\"\n )\n\n # Pick the maximum of every interval_size-days\n # for row_or_count in np.arange(len(no_steps)-1):\n # curr_time_window = a_df[a_df.human_system_start_time >= first_year_steps[row_or_count]]\n # curr_time_window = curr_time_window[curr_time_window.doy < first_year_steps[row_or_count+1]]\n\n # if len(curr_time_window)==0:\n # regular_df.loc[row_or_count, V_idks] = -1.5\n # else:\n # regular_df.loc[row_or_count, V_idks] = max(curr_time_window[V_idks])\n\n # regular_df.loc[row_or_count, 'image_year'] = curr_year\n # regular_df.loc[row_or_count, 'doy'] = first_year_steps[row_or_count]\n\n for start_date in regular_df.human_system_start_time:\n \"\"\"\n The following will crate an array (of length 2)\n it goes from a day to 10 days later; end points of the interval_size-day interval.\n\n # Here we add 1 day to the right end point (end_date)\n because the way pandas/python slices the dataframe;\n does not include the last row of sub-dataframe\n \"\"\"\n dateRange = pd.date_range(\n start_date, start_date + pd.Timedelta(days=interval_size - 1), freq=str(1) + \"D\"\n )\n assert len(dateRange) == interval_size\n\n curr_time_window = a_df[a_df.human_system_start_time.isin(dateRange)]\n if len(curr_time_window) == 0:\n regular_df.loc[regular_df.human_system_start_time == start_date, V_idks] = -1.5\n else:\n regular_df.loc[regular_df.human_system_start_time == start_date, V_idks] = max(\n curr_time_window[V_idks]\n )\n ##### end the for-loop\n\n ##\n ## Some days will be missing from the beginning and end of the whole time series.\n ##\n # all_years = np.arange(start_year, end_year+1)\n # leapyear_count = np.sum([is_leap_year(item) for item in all_years])\n # total_no_days = (leapyear_count*366) + ((end_year - start_year + 1 - leapyear_count)*365)\n # total_no_points = total_no_days//interval_size\n # missing_count = total_no_points - regular_df.shape[0]\n # missing_from_beginning = (min(regular_df.human_system_start_time) - \\\n # pd.to_datetime(datetime.datetime(start_year, 1, 1, 0, 0))).days // interval_size\n\n # missing_from_end = missing_count - missing_from_beginning\n A = pd.date_range(\n pd.Timestamp(start_year, 1, 1),\n min(regular_df.human_system_start_time),\n freq=str(interval_size) + \"D\",\n )\n\n missing_begin_df = pd.DataFrame(data=None, index=np.arange(len(A[:-1])), columns=regular_cols)\n\n missing_begin_df.human_system_start_time = A[:-1]\n missing_begin_df.ID = regular_df.ID.unique()[0]\n mm = min(regular_df.human_system_start_time)\n missing_begin_df[V_idks] = np.array(\n regular_df[regular_df.human_system_start_time == mm][V_idks]\n )[0]\n if \"dataset\" in regular_cols:\n missing_begin_df.dataset = regular_df.dataset.unique()[0]\n\n #\n # The tail of the TS\n #\n A = pd.date_range(\n max(regular_df.human_system_start_time),\n pd.Timestamp(end_year, 12, 31),\n freq=str(interval_size) + \"D\",\n )\n\n missing_end_df = pd.DataFrame(data=None, index=np.arange(len(A[1:])), columns=regular_cols)\n missing_end_df.human_system_start_time = A[1:]\n missing_end_df.ID = regular_df.ID.unique()[0]\n mm = max(regular_df.human_system_start_time)\n missing_end_df[V_idks] = np.array(regular_df[regular_df.human_system_start_time == mm][V_idks])[\n 0\n ]\n if \"dataset\" in regular_cols:\n missing_end_df.dataset = regular_df.dataset.unique()[0]\n\n regular_df = pd.concat([missing_begin_df, regular_df, missing_end_df])\n regular_df.reset_index(drop=True, inplace=True)\n return regular_df\n\n\ndef set_negatives_to_zero(df, indeks=\"NDVI\"):\n df.loc[df[indeks] < 0, indeks] = 0\n return df\n\n\ndef clip_outliers(df, idx=\"NDVI\"):\n # dt_copy = df.copy()\n df.loc[df[idx] > 1, idx] = 1\n df.loc[df[idx] < -1, idx] = -1\n return df\n\n\ndef add_human_start_time_by_system_start_time(HDF):\n \"\"\"Returns human readable time (conversion of system_start_time)\n\n Arguments\n ---------\n HDF : dataframe\n\n Returns\n -------\n HDF : dataframe\n the same dataframe with added column of human readable time.\n \"\"\"\n HDF.system_start_time = HDF.system_start_time / 1000\n time_array = HDF[\"system_start_time\"].values.copy()\n human_time_array = [time.strftime(\"%Y-%m-%d\", time.localtime(x)) for x in time_array]\n HDF[\"human_system_start_time\"] = human_time_array\n\n if type(HDF[\"human_system_start_time\"] == str):\n HDF[\"human_system_start_time\"] = pd.to_datetime(HDF[\"human_system_start_time\"])\n\n \"\"\"\n Lets do this to go back to the original number:\n I added this when I was working on Colab on March 30, 2022.\n Keep an eye on it and see if we have ever used \"system_start_time\"\n again. If we do, how we use it; i.e. do we need to get rid of the \n following line or not.\n \"\"\"\n HDF.system_start_time = HDF.system_start_time * 1000\n return HDF\n\n\ndef DTW_prune(ts1, ts2):\n d, _ = dtw.warping_paths(ts1, ts2, window=10, use_pruning=True)\n return d\n\n\n# We need this for DL\n# load and prepare the image\ndef load_image(filename):\n img = load_img(filename, target_size=(224, 224)) # load the image\n img = img_to_array(img) # convert to array\n img = img.reshape(1, 224, 224, 3) # reshape into a single sample with 3 channels\n img = img.astype(\"float32\") # center pixel data\n img = img - [123.68, 116.779, 103.939]\n return img\n","sub_path":"NASA/Python_codes/NASA_core.py","file_name":"NASA_core.py","file_ext":"py","file_size_in_byte":38663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"648092858","text":"import numpy as np\nimport scipy\n#import matcompat\nfrom consts import EPS\n\n\ndef q_resamp(spk, nti, nto, q):\n\n # Local Variables: rho, iu, spk, id, trials, pd, memory, ns, pn, spks, L, nti, nto, idx2, idx1, a, i, ml, k, m, q, p, t, z\n # Function calls: rand, q_resamp, max, sum, eps, pqmargs, zeros, squeeze, size\n #%it resamples the given experiment keeping\n #%statistical structure up to order q\n #%nti: vector with the number of trials per stimulus in the input experiment\n #%nto: vector with the number of trials per stimulus in the output resampled experiment\n #%ntr=size(spk,3);\n ns = spk.shape[4-1] #matcompat.size(spk, 4.)\n L = spk.shape[2-1] #matcompat.size(spk, 2.)\n m = max(nto)\n spks = np.zeros([1, L, m, ns], int)\n for t in range(1, (ns)+1):\n #%over all stimulus conditions\n\n #%over all stimulus conditions\n trials = np.squeeze(spk[0,:,0:nti[int(t)-1],int(t)-1]).conj().T\n #%' \n #%trials set for current stimulus condition\n if L == 1:\n trials = trials.conj().T\n #%'\n \n \n [pn, pd] = pqmargs(trials, L, q)\n for k in np.arange(1, (nto[int(t)-1])+1):\n rho = np.zeros([1, L], int)\n #%now generate the samples\n #%probability of getting a 1 in bin 1 is 1-pn(1,1)\n z = np.random.rand(1., 1.)\n rho[0] = z<1.-pn[0,0]\n for i in np.arange(2., (L)+1):\n iu = i-1\n id = i-q\n if id<1:\n id = 1.\n \n \n memory = rho[int(id)-1:iu]\n ml = iu-id+1.\n #idx1 = np.sum((matixpower(2., np.array(np.hstack((np.arange(0., (ml-1.)+1)))))*memory))+1.\n idx1 = np.sum(((2 ** np.arange(0, ml-1+1))*memory))+1\n idx2 = idx1+2**ml\n #%this is the index to the probability of getting a 1 in bin i given the memory\n p = matdiv(pn[int(i)-1,int(idx2)-1], pd[int(i)-1,int(idx1)-1]+EPS)\n z = np.random.rand(1., 1.)\n rho[int(i)-1] = z',\n 'js': '',\n }\n)","sub_path":"django_webpack/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"430638150","text":"class Empleado(object):\n\tLength=0\n\tdef __init__(self,nombre,apellidos,cedula,cargo,departamento,salario,rfc,telefono,segSocial,tipoContrato):\n\t\tself.nombre= nombre\n\t\tself.apellidos= apellidos\n\t\tself.cedula = cedula\n\t\tself.cargo= cargo\n\t\tself.departamento= departamento\n\t\tself.escolaridad= escolaridad\n\t\tself.salario= salario\n\t\tself.rfc= rfc\n\t\tself.telefono= telefono\n\t\tself.segSocial= segSocial\n\t\tself.tipoContrato= tipoContrato\n","sub_path":"clase 4 CGC/Empleado.py","file_name":"Empleado.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"137490371","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom django.http import HttpResponse\nfrom .models import Course, Contact\nfrom django.template import loader\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.contrib import messages\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.db.models import Q\n\n\ndef Courses(request):\n std = Course.objects.all()\n template = loader.get_template('Classmate/Course.html')\n context = {\n 'std': std,\n }\n\n return HttpResponse(template.render(context, request))\n\n\ndef logout_request(request):\n logout(request)\n messages.info(request, \"Logged out Successfully!\")\n return redirect('Classmate:home_page')\n\n\ndef login_request(request):\n if request.method == \"POST\":\n form = AuthenticationForm(request, data=request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n messages.info(request, f\"You are logged in as {username}\")\n return redirect(\"Classmate:home_page\")\n else:\n messages.error(request, f\"Invalid username or password\")\n else:\n messages.error(request, f\"Invalid username or password\")\n\n form = AuthenticationForm()\n return render(request=request, template_name='Classmate/login.html', context={'form': form})\n\n\ndef contact(request):\n if request.method == \"POST\":\n name = (request.POST.get('name', ''))\n email = (request.POST.get('email', ''))\n phone = (request.POST.get('phone', ''))\n desc = (request.POST.get('desc', ''))\n print(name, email, phone, desc)\n contacts = Contact(name=name, email=email, phone=phone, desc=desc)\n contacts.save()\n return render(request, 'Classmate/contact.html')\n\n\n\ndef register(request):\n if request.method == \"POST\":\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f\"New Account created: {username}\")\n login(request, user)\n messages.info(request, f\"You are now logged in as {username}\")\n return redirect(\"Classmate:home_page\")\n else:\n for msg in form.error_messages:\n messages.error(request, f\"{msg}: {form.error_messages[msg]}\")\n return render(request=request, template_name=\"Classmate/register.html\", context={'form': form})\n\n form = UserCreationForm\n return render(request=request, template_name=\"Classmate/register.html\", context={'form': form})\n\n\ndef about(request):\n return render(request, 'Classmate/about.html')\n\n\ndef home(request):\n return render(request, 'Classmate/Course.html')\n\n\ndef blog(request):\n return render(request, 'Classmate/blog.html')\n\ndef Details(request, course_id):\n course = get_object_or_404(Course, pk=course_id)\n return render(request, 'Classmate/Details.html', {'course': course})\n\n\ndef yourchoice(request, course_id):\n course = get_object_or_404(Course, pk=course_id)\n try:\n selected_Course_type = course.details_set.get(Course_type=request.POST['choice'])\n except (KeyError, Course.DoesNotExist):\n return render(request, 'Classmate/Details.html', {'course': course, 'error_message': \"Select a valid option\"})\n else:\n selected_Course_type.your_choice = True\n selected_Course_type.save()\n return render(request, 'Classmate/description.html', {'course': course})\n\n\ndef description(request, course_id):\n course = get_object_or_404(Course, pk=course_id)\n return render(request, 'Classmate/description.html', {'course': course})\n\n\ndef search(request):\n if request.method == 'GET':\n query = request.GET.get('q')\n submitbutton = request.GET.get('submit')\n if query is not None:\n lookups = Q(Course_name__icontains=query) | Q(Courses_id__icontains=query) | Q(description__icontains=query)\n results = Course.objects.filter(lookups).distinct()\n context = {\n 'results': results,\n 'submitbutton': submitbutton\n }\n return render(request, 'Classmate/search.html', context)\n else:\n return render(request, 'Classmate/search.html')\n else:\n return render(request, 'Classmate/search.html')\n","sub_path":"Website/Classmate/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"141542769","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport sys\nimport random\nimport hashlib\n\n\nclass Sketch:\n def __init__(self):\n self.arr = np.zeros((5, 256), dtype=int)\n self.n = 0\n\n def inc(self, x, s):\n self.n += 1\n m = hashlib.md5()\n m.update(str.encode(str(x)))\n m.update(str.encode(str(s)))\n hashes = m.digest()\n for i in range(5):\n self.arr[i, int(hashes[i])] += 1\n\n def cons_inc(self, x, s):\n self.n += 1\n m = hashlib.md5()\n m.update(str.encode(str(x)))\n m.update(str.encode(str(s)))\n hashes = m.digest()\n min_count = sys.maxsize\n for i in range(5):\n min_count = min(min_count, self.arr[i, int(hashes[i])])\n for i in range(5):\n if self.arr[i, int(hashes[i])] == min_count:\n self.arr[i, int(hashes[i])] += 1\n\n def count(self, x, s):\n m = hashlib.md5()\n m.update(str.encode(str(x)))\n m.update(str.encode(str(s)))\n hashes = m.digest()\n count = sys.maxsize\n for i in range(5):\n count = min(self.arr[i, int(hashes[i])], count)\n return count\n\n def clear(self):\n self.arr.fill(0)\n self.n = 0\n\n\n# Genereate datastreams\nstream = []\nfor i in range(1, 10):\n for j in range(1000 * i + 1, 1000 * (i + 1) + 1):\n for _ in range(i):\n stream.append(j)\n\nfor i in range(1, 61):\n for _ in range(i):\n for _ in range(i):\n stream.append(i)\n\n\n# Machinery to run a stream and test it\ndef feed_stream(sketch, stream, s):\n hitters = set()\n for data in stream:\n sketch.inc(data, s)\n count = sketch.count(data, s)\n if count > len(stream) / 100:\n hitters.add(data)\n return len(hitters)\n\n\ndef test_stream(stream):\n sketch = Sketch()\n sixty_freq = 0\n one_freq = 0\n num_hitters = 0\n for i in range(20):\n print(\"Feeding stream {0}\".format(i))\n num_hitters += feed_stream(sketch, stream, i)\n sixty_freq += sketch.count(60, i)\n one_freq += sketch.count(1, i)\n sketch = Sketch()\n print(\"Sixty freq estimate: {0}\".format(sixty_freq // 20))\n print(\"One freq estimate: {0}\".format(one_freq // 20))\n print(\"Number of heavy hitters: {0}\".format(num_hitters // 20))\n\n\nstream.sort()\ntest_stream(stream)\n\nstream.sort(reverse=True)\ntest_stream(stream)\n\nrandom.shuffle(stream)\ntest_stream(stream)\n","sub_path":"hw1/p2/countmin.py","file_name":"countmin.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"620806130","text":"from database import db_connect\nfrom models import *\nimport os\nimport codecs\nimport re\nfrom datetime import datetime\n\n\ndef chat_worker(kargs):\n obj = ChatWorker(read_source_id=kargs['read_source_id'], exp=kargs['exp'],\n chats_path=kargs['chats_path'], att_path=kargs['att_path'])\n obj.run(kargs['filename'])\n\n\nclass ChatWorker:\n\n def __init__(self, read_source_id, exp, chats_path, att_path):\n self.read_source_id = read_source_id\n self.exp = exp\n self.chats_path = chats_path\n self.att_path = att_path\n\n def add(self, obj):\n obj.read_source_id = self.read_source_id\n self.db_session.add(obj)\n\n def commit(self):\n self.db_session.commit()\n\n def add_participant(self, identifier, name):\n participant = self.db_session.query(Participant).filter(\n Participant.identifier == identifier, Participant.name == name).first()\n if not participant:\n participant = Participant()\n participant.identifier = identifier\n participant.name = name\n self.add(participant)\n self.commit()\n return participant\n\n def getChatsFilename(self):\n return os.listdir(self.chats_path)\n\n def line2message(self, message):\n result = re.search(self.exp[0], message, flags=re.DOTALL)\n if result:\n timestamp = result.group('timestamp')\n from_ = result.group('from')\n body = result.group('body')\n return {\n 'timestamp': timestamp.strip() if timestamp else '',\n 'from': from_.strip() if from_ else '',\n 'body': body.strip() if body else ''\n }\n\n def run(self, filename):\n self.engine, self.db_session = db_connect()\n self.read_source = self.db_session.query(\n ReadSource).get(self.read_source_id)\n with codecs.open(os.path.join(self.chats_path, filename), 'r', 'utf-8') as f:\n text = f.read()\n\n chat = Chat()\n chat.name = os.path.basename(filename)\n chat.source = self.read_source.chat_source\n chat.deleted_state = \"Intact\"\n self.add(chat)\n\n message_text = re.sub(self.exp[1], r'\\1', text)\n splitted_text = message_text.split('')[1:]\n for msg_raw in splitted_text:\n result = self.line2message(msg_raw)\n if not result:\n continue\n msg = Message()\n if result['from']:\n p = self.add_participant(\n result['from'], result['from']) # participante\n msg.from_ = p\n if not p in chat.participants:\n chat.participants.append(p)\n msg.body = result['body']\n msg.deleted_state = \"Intact\"\n attachment_regex = r'(.*\\..{3,4}\\s+){1}(?:\\(.*\\)){1}'\n regex_test = re.search(attachment_regex, msg.body)\n if regex_test:\n attachment_regex_valitade = r'(.*\\..{3,4}\\s+)'\n regex_test = re.search(attachment_regex, msg.body).groups()\n validate = re.search(\n attachment_regex_valitade, regex_test[0])\n if validate:\n validate = validate.string\n validate = validate.strip()\n validate = validate[1:]\n validate = os.path.join(self.att_path, validate)\n if os.path.exists(validate):\n attachment = File()\n attachment.extracted_path = str(Path(validate).relative_to(\n self.read_source.folder)) if validate else None\n attachment.filename = os.path.basename(\n attachment.extracted_path)\n attachment.size = os.path.getsize(validate)\n self.add(attachment)\n msg.attachments.append(attachment)\n\n if result['timestamp']:\n try:\n date = datetime.strptime(\n result['timestamp'], self.exp[2])\n msg.timestamp = date\n except Exception as e:\n print(e)\n self.add(msg)\n chat.messages.append(msg)\n self.add(chat)\n self.commit()\n self.engine.dispose()\n","sub_path":"parsers/spi2db/workers.py","file_name":"workers.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"350860471","text":"# Create a function that takes a list as a parameter,\n# and returns a new list with every odd number from the orignal list\n# example: [1, 2, 3, 4, 5] should produce [1, 3, 5]\nlist_to_be_filtered = [1, 2, 3, 4, 5]\n\ndef find_odds(numbers):\n if not numbers:\n return []\n if numbers[0] % 2 == 1:\n return [numbers[0]] + find_odds(numbers[1:])\n return find_odds(numbers[1:])\n\nprint(find_odds(list_to_be_filtered))","sub_path":"oddfilter/odd_filter.py","file_name":"odd_filter.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"540941006","text":"from discord.ext import commands\nimport logging\nfrom utils import Config, permission_node\nfrom .utils import isUp, sendCmd\n\nlog = logging.getLogger('charfred')\n\n\nclass ConsoleCmds(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.loop = bot.loop\n self.servercfg = bot.servercfg\n if 'whitelistcategories' not in self.servercfg:\n self.servercfg['whitelistcategories'] = {}\n if 'defaultcategory' not in self.servercfg:\n self.servercfg['defaultcategory'] = ''\n\n @commands.group(aliases=['mc'], invoke_without_command=True)\n @permission_node(f'{__name__}.whitelist')\n async def minecraft(self, ctx):\n \"\"\"Minecraft server console commands.\"\"\"\n\n pass\n\n @minecraft.group(invoke_without_command=True)\n @permission_node(f'{__name__}.whitelist')\n async def whitelist(self, ctx, player: str, category: str=None):\n \"\"\"Add a player to the whitelist.\n\n Optionally takes a category name, for whitelisting\n given player on servers in that category only.\n If a default category is set, it will be used, instead\n of defaulting to all known servers.\n \"\"\"\n\n log.info('Whitelisting player.')\n if not category:\n category = self.servercfg['defaultcategory']\n if category:\n try:\n servers = self.servercfg['whitelistcategories'][category]\n except KeyError:\n log.warning('Category not found!')\n await ctx.sendmarkdown(f'< {category} does not exist! >')\n return\n else:\n servers = self.servercfg['servers']\n\n msg = ['Command Log', '==========', f'> Category: {category}' if category else '']\n for server in servers:\n if isUp(server):\n log.info(f'Whitelisting {player} on {server}.')\n await sendCmd(self.loop, server, f'whitelist add {player}')\n msg.append(f'# Whitelisted {player} on {server}.')\n else:\n log.warning(f'Could not whitelist {player} on {server}.')\n msg.append(f'< Unable to whitelist {player}, {server} is offline! >')\n await ctx.sendmarkdown('\\n'.join(msg))\n\n @whitelist.command()\n async def remove(self, ctx, player: str, category: str=None):\n \"\"\"Remove a player from the whitelist.\"\"\"\n\n log.info('Unwhitelisting player.')\n if not category:\n category = self.servercfg['defaultcategory']\n if category:\n try:\n servers = self.servercfg['whitelistcategories'][category]\n except KeyError:\n log.warning('Category not found!')\n await ctx.sendmarkdown(f'< {category} does not exist! >')\n return\n else:\n servers = self.servercfg['servers']\n\n msg = ['Command Log', '==========', f'> Category: {category}' if category else '']\n for server in servers:\n if isUp(server):\n log.info(f'Unwhitelisting {player} on {server}.')\n await sendCmd(self.loop, server, f'whitelist remove {player}')\n msg.append(f'# Unwhitelisting {player} on {server}.')\n else:\n log.warning(f'Could not unwhitelist {player} on {server}.')\n msg.append(f'< Unable to unwhitelist {player}, {server} is offline! >')\n await ctx.sendmarkdown('\\n'.join(msg))\n\n @whitelist.command()\n async def check(self, ctx, player: str):\n \"\"\"Check if a player is on the whitelist.\"\"\"\n\n msg = ['Command Log', '==========']\n for server in self.servercfg['servers']:\n try:\n with open(\n self.servercfg['serverspath'] + f'/{server}/whitelist.json', 'r'\n ) as whitelist:\n if player in whitelist.read():\n msg.append(f'# {player} is whitelisted on {server}.')\n else:\n msg.append(f'< {player} is NOT whitelisted on {server}. >')\n except FileNotFoundError:\n msg.append(f'< {server} does not have a whitelist.json file! >')\n await ctx.sendmarkdown('\\n'.join(msg))\n\n @whitelist.group(invoke_without_command=True)\n @permission_node(f'{__name__}.categories')\n async def category(self, ctx):\n \"\"\"Whitelist category commands.\n\n Returns a list of all currently defined\n categories, if no subcommand is given.\n \"\"\"\n\n msg = ['Whitelist Categories', '============']\n try:\n for category, servers in self.servercfg['whitelistcategories'].items():\n msg.append(f'# {category}:')\n for server in servers:\n msg.append(f'\\t{server}')\n except KeyError:\n msg.append('> No Categories defined!')\n await ctx.sendmarkdown('\\n'.join(msg))\n\n @category.command()\n async def setdefault(self, ctx, category: str):\n \"\"\"Sets a defined category to be the default for whitelisting.\n\n If a default is set, whitelist commands will use it instead\n of defaulting to all known servers.\n \"\"\"\n\n if category not in self.servercfg['whitelistcategories']:\n log.warning('Category not found!')\n await ctx.sendmarkdown(f'< {category} does not exist! >')\n else:\n self.servercfg['defaultcategory'] = category\n log.info('Set default whitelisting category!')\n await self.servercfg.save()\n await ctx.sendmarkdown(f'# {category} set as default whitelisting category.')\n\n @category.command()\n async def add(self, ctx, category: str, *servers):\n \"\"\"Add a new whitelist category.\n\n Takes a name for the new category and the names\n of all servers that should be a part of it.\n If category exists, given servers will be added to it.\n \"\"\"\n\n if category not in self.servercfg['whitelistcategories']:\n log.info('Adding new whitelist category.')\n self.servercfg['whitelistcategories'][category] = []\n if servers:\n for server in servers:\n log.info(f'Added {server} to {category}.')\n self.servercfg['whitelistcategories'][category].append(server)\n await self.servercfg.save()\n await ctx.sendmarkdown(f'Done!')\n\n @category.command(name='remove')\n async def _remove(self, ctx, category: str, server: str=None):\n \"\"\"Removes a whitelist category or a given server from a category.\"\"\"\n\n if server:\n log.info(f'Removing {server} from {category}.')\n try:\n self.servercfg['whitelistcategories'][category].remove(server)\n except KeyError:\n log.warning('Category not found!')\n await ctx.sendmarkdown(f'< {category} does not exist! >')\n except ValueError:\n log.warning('Server not found!')\n await ctx.sendmarkdown(f'> {server} is not in {category}!')\n else:\n await ctx.sendmarkdown(f'# {server} removed from {category}.')\n finally:\n return\n log.info(f'Removing {category}.')\n try:\n del self.servercfg['whitelistcategories'][category]\n except KeyError:\n log.warning('Category not found!')\n await ctx.sendmarkdown(f'< {category} does not exist! >')\n else:\n await ctx.sendmarkdown(f'# {category} removed!')\n await self.servercfg.save()\n\n @minecraft.command()\n @permission_node(f'{__name__}.kick')\n async def kick(self, ctx, server: str, player: str):\n \"\"\"Kick a player from a specified server.\n\n Takes a servername and playername, in that order.\n \"\"\"\n\n msg = ['Command Log', '==========']\n if isUp(server):\n log.info(f'Kicking {player} from {server}.')\n await sendCmd(self.loop, server, f'kick {player}')\n msg.append(f'# Kicked {player} from {server}.')\n else:\n msg.append(f'< {server} is not online! >')\n await ctx.sendmarkdown('\\n'.join(msg))\n\n @minecraft.command()\n @permission_node(f'{__name__}.ban')\n async def ban(self, ctx, player: str):\n \"\"\"Bans a player, and unwhitelists just to be safe.\"\"\"\n\n msg = ['Command Log', '==========']\n for server in self.servercfg['servers']:\n if isUp(server):\n log.info(f'Banning {player} on {server}.')\n await sendCmd(self.loop, server, f'ban {player}')\n log.info(f'Unwhitelisting {player} on {server}.')\n await sendCmd(self.loop, server, f'whitelist remove {player}')\n msg.append(f'# Banned {player} from {server}.')\n else:\n log.warning(f'Could not ban {player} from {server}.')\n msg.append(f'< Unable to ban {player}, {server} is offline! >')\n await ctx.sendmarkdown('\\n'.join(msg))\n\n @minecraft.command(aliases=['pass'])\n @permission_node(f'{__name__}.relay')\n async def relay(self, ctx, server: str, *, command: str):\n \"\"\"Relays a command to a servers\\' console.\n\n Takes a servername and a command, in that order.\n \"\"\"\n\n msg = ['Command Log', '==========']\n if isUp(server):\n log.info(f'Relaying \\\"{command}\\\" to {server}.')\n await sendCmd(self.loop, server, command)\n msg.append(f'# Relayed \\\"{command}\\\" to {server}.')\n else:\n log.warning(f'Could not relay \\\"{command}\\\" to {server}.')\n msg.append(f'< Unable to relay command, {server} is offline! >')\n await ctx.sendmarkdown('\\n'.join(msg))\n\n\ndef setup(bot):\n if not hasattr(bot, 'servercfg'):\n default = {\n \"servers\": {}, \"serverspath\": \"NONE\", \"backupspath\": \"NONE\", \"oldTimer\": 1440\n }\n bot.servercfg = Config(f'{bot.dir}/configs/serverCfgs.toml',\n default=default,\n load=True, loop=bot.loop)\n permission_nodes = ['whitelist', 'categories', 'kick', 'ban', 'relay']\n bot.register_nodes([f'{__name__}.{node}' for node in permission_nodes])\n bot.add_cog(ConsoleCmds(bot))\n","sub_path":"minecraftcogs/consolecmds.py","file_name":"consolecmds.py","file_ext":"py","file_size_in_byte":10304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"203890159","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp import models, fields, api, _\nfrom openerp.exceptions import ValidationError\nfrom openerp.osv.expression import get_unaccent_wrapper\n\n\nclass res_partner(models.Model):\n \n _inherit = 'res.partner'\n \n id = fields.Integer(string='ID', readonly=True)\n name_en = fields.Char(string='Name (en)')\n create_date = fields.Datetime(readonly=True)\n is_government = fields.Boolean(string='Government', compute='_is_government') \n vat = fields.Char(size=13)\n taxbranch = fields.Char(string='Tax Branch ID', size=5)\n search_key = fields.Char(string='Search Key', compute='_get_search_key', store=True)\n sap_code = fields.Char(string='SAP customer code', size=20)\n # For group by only,\n single_category_id = fields.Many2one('res.partner.category', compute='_get_single_category_id', store=True)\n require_taxid = fields.Boolean(string='Require Tax ID', compute='_get_require_taxbranch', store=True, multi='taxbranch')\n require_taxbranch = fields.Boolean(string='Require Tax Branch ID', compute='_get_require_taxbranch', store=True, multi='taxbranch')\n\n @api.one\n @api.constrains('name', 'supplier', 'customer')\n def _check_partner_name(self):\n partner_ids = self.search([('name', '=', self.name),\n '|', ('supplier', '=', True),\n ('customer', '=', True)])\n if len(partner_ids) > 1:\n raise ValidationError(\"Partner Name must be unique!\")\n\n @api.one\n @api.constrains('vat', 'taxbranch', 'category_id')\n def _check_vat_taxbranch_unique(self):\n if not self.is_government and self.category_id.require_tax_branch_unique:\n partners = self.search([('vat','=',self.vat),('taxbranch','=',self.taxbranch)])\n if len(partners) > 1:\n raise ValidationError(_(\"Tax ID + Tax Branch ID must be unique for non-governmental organization!\"))\n \n @api.one\n @api.constrains('vat')\n def _check_vat(self):\n if self.require_taxid and self.vat > 0 and len(self.vat) != 13:\n raise ValidationError(_(\"Tax ID must be 13 digits!\"))\n \n @api.one\n @api.constrains('taxbranch')\n def _check_taxbranch(self):\n if self.require_taxbranch and self.taxbranch > 0 and len(self.taxbranch) != 5:\n raise ValidationError(\"Tax Branch ID must be 5 digits!\")\n \n @api.one\n @api.constrains('is_company', 'parent_id', 'child_ids')\n def _check_is_company(self):\n if not self.is_company and self.child_ids:\n raise ValidationError(_(\"A contact must not have child companies\"))\n \n @api.model\n def create(self, vals):\n partner = super(res_partner, self).create(vals)\n # Always use same tag as parent.\n if vals.get('parent_id', False):\n partner.category_id = partner.parent_id.category_id\n return partner\n \n @api.model\n def _pre_category_change(self, vals):\n # Do not allow change of partner tag, if it result in change of its accounting\n check = self.env['ir.config_parameter'].get_param('res_partner_ext.no_partner_tag_change_account')\n check = check and check.lower() or 'false'\n if check == 'true' and vals.get('category_id', False):\n category = vals.get('category_id') # Test whether index exists to prevent exception\n index_exists = len(category) > 0 and \\\n len(category[0]) > 2 and \\\n len(category[0][2]) > 0 or False\n if not index_exists:\n exit\n for partner in self:\n prev_categ = partner.category_id\n new_category_id = category[0][2][0]\n new_categ = self.env['res.partner.category'].browse(new_category_id)\n if prev_categ and \\\n (prev_categ.receivable_account_id != new_categ.receivable_account_id or \\\n prev_categ.payable_account_id != new_categ.payable_account_id):\n raise ValidationError(_(\"Changing of Partner Tag is not allowed, \"\n \"as it will result in changing of its account code\")) \n\n @api.model\n def _post_category_change(self, vals):\n # Parent's tag change, force change to all childs\n if vals.get('category_id', False):\n for partner in self:\n if partner.child_ids:\n for child in partner.child_ids:\n child.category_id = partner.category_id\n\n# @api.model\n# def _update_and_check_taxid(self, vals):\n# # update tax to parent's\n# for partner in self:\n# if not partner.parent_id:\n# continue\n# elif not partner.vat: # No VAT, set as parent\n# partner.vat = partner.parent_id.vat\n# elif vals.get('vat', False) and \\\n# vals.get('vat', False) != partner.parent_id.vat:\n# raise ValidationError(_(\n# \"\"\"Tax ID not match with parent company.\n# You can leave Tax ID blank to use parent company Tax ID\"\"\"\n# ))\n\n @api.multi\n def write(self, vals):\n self._pre_category_change(vals)\n res = super(res_partner, self).write(vals)\n self._post_category_change(vals)\n # self._update_and_check_taxid(vals)\n return res\n\n @api.v7\n def onchange_address(self, cr, uid, ids, use_parent_address, parent_id, context=None):\n result = super(res_partner, self).onchange_address(cr, uid, ids, use_parent_address, parent_id, context=context)\n parent = self.browse(cr, uid, parent_id, context=context)\n category_id = parent.category_id and parent.category_id[0].id or False\n if category_id: \n if result.get('value', False):\n result['value'].update({'category_id': [(6, 0, [category_id])]})\n else:\n result.update({'value': {'category_id': [(6, 0, [category_id])]}})\n return result\n\n @api.one\n @api.depends('category_id')\n def _get_single_category_id(self):\n if self.category_id:\n self.single_category_id = self.category_id[0].id\n \n @api.one\n @api.depends('category_id', 'parent_id')\n def _get_require_taxbranch(self):\n if self.parent_id: # If a contact, never set as required.\n self.require_taxid = False\n self.require_taxbranch = False\n elif self.single_category_id:\n self.require_taxid = self.single_category_id.require_taxid\n self.require_taxbranch = self.single_category_id.require_taxbranch\n \n @api.one\n @api.depends('category_id')\n def _is_government(self):\n if self.category_id and self.category_id[0].parent_id:\n gov_categ_id = self.env['ir.model.data'].get_object_reference('nstda_msd', 'partner_tag_government')[1]\n if self.category_id[0].parent_id.id == gov_categ_id:\n self.is_government = True\n else:\n self.is_government = False\n \n @api.one\n @api.depends('name')\n def _get_search_key(self):\n if type(self.id) in (int,):\n self.search_key = '%0*d' % (7, self.id)\n \n @api.constrains('category_id')\n def _check_one_partner_category(self):\n if len(self.category_id) > 1:\n raise ValidationError(_('Please select only 1 partner category'))\n \n @api.onchange('category_id')\n def _onchange_category_id(self):\n # Use partner category's account when avaiable\n if self.category_id:\n if self.category_id[0].receivable_account_id:\n self.property_account_receivable = self.category_id[0].receivable_account_id.id\n if self.category_id[0].payable_account_id:\n self.property_account_payable = self.category_id[0].payable_account_id.id\n if self.category_id[0].fiscal_position_id:\n self.property_account_position = self.category_id[0].fiscal_position_id.id\n else:\n self.property_account_receivable = False\n self.property_account_payable = False\n self.property_account_position = False\n\n def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):\n if not args:\n args = []\n if name == \"'\":\n name = \"''\"\n name = name.replace('[','')\n name = name.replace('(','')\n if name and operator in ('=', 'ilike', '=ilike', 'like', '=like'):\n self.check_access_rights(cr, uid, 'read')\n where_query = self._where_calc(cr, uid, args, context=context)\n self._apply_ir_rules(cr, uid, where_query, 'read', context=context)\n from_clause, where_clause, where_clause_params = where_query.get_sql()\n where_str = where_clause and (\" WHERE %s AND \" % where_clause) or ' WHERE '\n\n # search on the name of the contacts and of its company\n search_name = name\n if operator in ('ilike', 'like'):\n search_name = '%%%s%%' % name\n if operator in ('=ilike', '=like'):\n operator = operator[1:]\n\n unaccent = get_unaccent_wrapper(cr)\n\n query = \"\"\"SELECT id\n FROM res_partner\n {where} ({email} {operator} '{percent}'\n OR {display_name} {operator} '{percent}'\n -- nstda: added search\n OR {name_en} {operator} '{percent}'\n OR {search_key} {operator} '{percent}'\n OR {sap_code} {operator} '{percent}'\n OR {vat} {operator} '{percent}'\n OR {taxbranch} {operator} '{percent}'\n -- nstda: special search, combination of display_name and taxbranch\n OR (lower({display_name}) SIMILAR TO lower('%%({percent})%%') \n AND lower({taxbranch}) SIMILAR TO lower('%%({percent})%%')))\n --\n ORDER BY {display_name}\n \"\"\".format(where=where_str, operator=operator,\n email=unaccent('email'),\n display_name=unaccent('display_name'),\n # nstda\n name_en=unaccent('name_en'),\n search_key=unaccent('search_key'),\n sap_code=unaccent('sap_code'),\n vat=unaccent('vat'),\n taxbranch=unaccent('taxbranch'),\n # --\n percent=unaccent('%s'))\n where_clause_params += [search_name, search_name, search_name, search_name, search_name, search_name, search_name, search_name.replace(' ','|'), search_name.replace(' ','|')]\n if limit:\n query += ' limit %s'\n where_clause_params.append(limit)\n\n cr.execute(query % tuple(where_clause_params))\n ids = map(lambda x: x[0], cr.fetchall())\n if ids:\n return self.name_get(cr, uid, ids, context)\n else:\n return []\n return super(res_partner,self).name_search(cr, uid, name, args, operator=operator, context=context, limit=limit)\n \n\nclass res_partner_category(models.Model):\n\n _inherit = 'res.partner.category'\n \n parent_id = fields.Many2one('res.partner.category', domain=\"[('parent_id', '=', False)]\")\n payable_account_id = fields.Many2one('account.account', string='Account Payable', domain=\"[('type', '=', 'payable')]\", \n help=\"This account will be used as default payable account for a partner, when it is being created.\",)\n receivable_account_id = fields.Many2one('account.account', string='Account Receivable', domain=\"[('type', '=', 'receivable')]\", \n help=\"This account will be used as default receivable account for a partner, when it is being created.\",)\n require_taxid = fields.Boolean(string='Require Tax ID', default=False, help=\"When create partner in this category, always require Tax ID\")\n require_taxbranch = fields.Boolean(string='Require Tax Branch ID', default=False, help=\"When create partner in this category, always require TTax Branch ID\")\n fiscal_position_id = fields.Many2one('account.fiscal.position', string='Default Fiscal Position',\n help=\"For customer with this Partner Tag, it will be default with this Fiscal Position\")\n require_tax_branch_unique = fields.Boolean(string='Validate Tax/Branch Unique',\n help=\"Non-Government, checking this flag will ensure that Tax ID and Branch combination must be unique per company of this category\")\n\nclass res_partner_title(models.Model):\n\n _inherit = 'res.partner.title'\n \n name = fields.Char(translate=False)\n name_en = fields.Char(string='Title (en)')\n shortcut = fields.Char(translate=False)\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"res_partner_ext/partner.py","file_name":"partner.py","file_ext":"py","file_size_in_byte":14230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"598632667","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views import generic\nfrom .serializers import mqttSerializer, gazSerializer, ElectricitySerializer\nfrom rest_framework import filters, generics\nfrom .models import mqtt, gazoline, orbi_tmp, electricity, electrometr, electrozones\nfrom django_filters import rest_framework as filters\nfrom rest_framework import viewsets, status\nfrom rest_framework.filters import SearchFilter, OrderingFilter\nfrom django.shortcuts import render_to_response\nfrom qsstats import QuerySetStats\nfrom datetime import date, datetime, timedelta, timezone\nfrom .forms import MyForm, gazForm, orbiForm\nfrom django.http import HttpResponse, JsonResponse, request\nfrom django.db.models import F, Count, Value, Avg, Sum, Min, Max\nfrom django.views.generic.edit import DeleteView\nfrom django.urls import reverse_lazy\nfrom django.db.models.functions import Extract\nfrom random import randint\nfrom django.views.generic import TemplateView\nfrom chartjs.views.lines import BaseLineChartView\nfrom .script import stock ,orbitrack, orbi_stop\nfrom time import gmtime, strftime\nfrom rest_framework.decorators import api_view\nfrom rest_framework.parsers import JSONParser \nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nimport pytz\nimport csv\nimport numpy as np\nimport logging\nfrom .constants import *\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\n\n\n\n\nclass GazListjson(generics.ListAPIView):\n queryset=gazoline.objects.all().filter(fuel_type = 'LPG')\n serializer_class = gazSerializer\n\n\nclass mqttList(generics.ListAPIView):\n queryset = mqtt.objects.all()\n serializer_class = mqttSerializer\n\n\nclass mqttDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = mqtt.objects.all()\n serializer_class = mqttSerializer\n\n\nclass mqttViewSet(viewsets.ModelViewSet):\n __basic_fields = ('topic','payload','created_date',)\n #queryset = mqtt.objects.all().filter(topic = \"temp\")\n queryset = mqtt.objects.all()\n serializer_class = mqttSerializer\n filter_backends = (filters.DjangoFilterBackend, SearchFilter, OrderingFilter)\n filter_fields = __basic_fields\n search_fields = __basic_fields\n\n\n\nclass gazListView(generic.ListView):\n \"\"\"Generic class-based list view for a list of authors.\"\"\"\n model = gazoline\n fields = '__all__'\n # paginate_by = 3views.indicator_mqtt\n\n\ndef graph_1(request):\n return render(request, 'graph1.html')\n\n\ndef publish_1(request):\n return render(request, 'publish.html')\n\n@login_required\ndef select(request):\n queryset = mqtt.objects.all().filter(topic = \"temperature\")[::0]\n #queryset = mqtt.objects.get(Q(topic = \"home/bath/esp1/humidity\"))\n return render(request, \"select.html\", {\"mqtt\": queryset})\n\n@login_required\ndef search(request):\n return render_to_response('search.html')\n\n@login_required\ndef search_result(request):\n if 'q' in request.GET and request.GET['q']:\n q = request.GET['q']\n\n if 'r' in request.GET and request.GET['r']:\n r = int(request.GET['r'])\n\n queryset = mqtt.objects.filter(topic = q)[::r]\n return render_to_response('search_result.html',\n {'mqtt': queryset, 'query': q, 'skipped_rows': r})\n else:\n return HttpResponse('Please submit a search term.')\n\n@login_required\ndef publish(request):\n if request.method == \"POST\":\n queryset_temp = mqtt.objects.all().filter(topic = \"temperature\")[::0]\n queryset_hum = mqtt.objects.all().filter(topic = \"humidity\")[::0]\n date = POST['datepicker']\n return render(request.POST, \"line_chart.html\", {\"my_date\": date, \"my_data_temp\": queryset_temp, \"my_data_hum\": queryset_hum})\n else:\n form = MyForm()\n return render(request, 'calendar_2.html', {'form': form})\n\n@login_required\ndef view_func_iii(request):\n start_date = date(2016, 3, 12)\n end_date = date(2016, 3, 16)\n queryset = mqtt.objects.all()\n qsstats = QuerySetStats(queryset, date_field='created_date')\n values = qsstats.time_series(start_date, end_date, interval='days', aggregate=Count('created_date'))\n summary = qsstats.time_series(start_date, end_date, aggregate=Count('payload'))\n return render_to_response('template_2.html', {'values': values, 'summary': summary})\n\n@login_required\ndef view_func(request):\n queryset_temp = mqtt.objects.all().filter(topic = \"home/poliv/temp\")[::]\n queryset_hum = mqtt.objects.all().filter(topic = \"home/poliv/water\").order_by('-created_date')\n return render(request, \"line_chart.html\", {\"my_data_temp\": queryset_temp, \"my_data_hum\": queryset_hum})\n\n@login_required\ndef google_graphs(request):\n return render_to_response('graph1.html')\n@login_required\n#@csrf_exempt\ndef google_rest_int(request):\n \"\"\"\n List all code snippets, or create a new snippet.\n \"\"\"\n if request.method == 'GET':\n mqtt_data = mqtt.objects.all().filter(payload__lte = 22.5).filter(topic = \"home/poliv/temp\")\n serializer = mqttSerializer(mqtt_data, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n\n@login_required\ndef google_rest(request):\n return render_to_response('test2020.html')\n\n\ndef indicator(request):\n return render_to_response('indicator_2.html')\n\n\ndef indicator_mqtt(request):\n orbitrack.main()\n return render_to_response('indicator_mqtt.html')\n\ndef orbiStop(request):\n orbi_stop.main()\n return render_to_response('index_1.html')\n\n\n\nclass gazDetailView(generic.DetailView):\n \"\"\"Generic class-based list view for a list of authors.\"\"\"\n model = gazoline\n\n\n@login_required\ndef gaz_add(request):\n if request.method == \"POST\":\n form = gazForm(request.POST, request.FILES)\n if form.is_valid():\n boards = form.save(commit=False)\n #modules.name = form['name'].value()\n boards.save()\n return redirect('gaz_list')\n else:\n form = gazForm()\n return render(request, 'myhome_1/gazoline_add.html', {'form': form})\n\n\n@login_required\ndef gaz_template(request):\n start_date = 2012\n end_date = 2013\n start_date = int(request.GET['year_1'])\n end_date = int(request.GET['year_2'])\n\n cont = gazoline.agregates(gazoline, start_date, end_date)\n\n values_count = [t[\"query_Count\"] for t in cont]\n values_dat = [t[\"start_date\"][:4] for t in cont]\n values_sum = [t[\"query_Sum\"] for t in cont]\n values_avg = [t[\"query_Avg\"] for t in cont]\n values_liters = [t[\"query_Liters\"] for t in cont]\n values_rashod = [t[\"rashod\"] for t in cont]\n values_distance = [t[\"distance\"] for t in cont]\n\n values_count.pop(), values_dat.pop(),values_sum.pop()\n values_avg.pop(), values_liters.pop(), values_distance.pop(), values_rashod.pop()\n\n\n #print(\"disc\", gazoline().disc())\n # print(\"values_dat\", values_dat)\n #captions_a = [t[0].year for t in values_dat]\n\n return render(request, 'template_gaz_func.html', {'cont': cont,\n 'values_dat': values_dat,\n 'values_count': values_count,\n 'values_sum': values_sum,\n 'values_avg': values_avg,\n 'values_liters': values_liters,\n 'values_distance': values_distance,\n 'values_rashod': values_rashod\n })\n\n@login_required\ndef gazoline_edit(request, pk):\n g = get_object_or_404(gazoline, pk=pk)\n if request.method == \"POST\":\n form = gazForm(request.POST, request.FILES, instance=g)\n if form.is_valid():\n form.save()\n return redirect('gazoline_edit_view', pk=pk)\n else:\n form = gazForm(instance=g)\n return render(request, 'myhome_1/gazoline_edit.html', {'form': form})\n\n\n@login_required\ndef gaz_template_month(request):\n start_date = date(int(request.GET['year_1'][0:4]),1,1)\n end_date = date(int(request.GET['year_1'][0:4]),12,31)\n query = gazoline.objects.all()\n qsstats = QuerySetStats(query, date_field='created_date', aggregate=Sum('price_after_disc'))\n values = qsstats.time_series(start_date, end_date, interval='months')\n qsstats = QuerySetStats(query, date_field='created_date', aggregate=Sum('liters'))\n liters = qsstats.time_series(start_date, end_date, interval='months')\n query_1 = query.filter(created_date__year=start_date.year)\n\n return render_to_response( 'template_gaz_google.html', {'query':query_1,'values':values, 'liters':liters})\n\n\n@login_required\ndef gaz_search(request):\n return render_to_response('gaz_search.html')\n\n\n@login_required\ndef gchart(request):\n return render_to_response('gchart_1.html')\n\n\n@login_required\ndef gaz_search_result(request):\n\n start_date = date(int(request.GET['year_1']),1,1)\n end_date = date(int(request.GET['year_2']),12,31)\n query = gazoline.objects.all().filter(created_date__range=(start_date, end_date))\n query_Avg = query.aggregate(Avg('price_liter'))[\"price_liter__avg\"]\n query_Count = query.aggregate(Count('created_date'))[\"created_date__count\"]\n query_Sum = query.aggregate(Sum('price_after_disc'))[\"price_after_disc__sum\"]\n query_Liters = query.aggregate(Sum('liters'))[\"liters__sum\"]\n query_Min = query.aggregate(Min('millage'))[\"millage__min\"]\n query_Max = query.aggregate(Max('millage'))[\"millage__max\"]\n distance = query_Max - query_Min\n qsstats = QuerySetStats(query, date_field='created_date')\n values_dat_m = qsstats.time_series(start_date, end_date, interval='months', aggregate=Sum('price_after_disc'))\n values_a = [t[1] for t in values_dat_m]\n captions_a = [t[0] for t in values_dat_m]\n\n return render(request, 'search_result_gaz.html', {'values': values_dat_m,\n 'query_Avg': query_Avg,\n 'query_Count': query_Count,\n 'query_Sum': query_Sum,\n 'query_Liters': query_Liters,\n 'values_dat': values_dat_m,\n 'values_a': values_a,\n 'captions_a': captions_a,\n 'distance': distance })\n\nclass GazDelete(DeleteView):\n model = gazoline\n # template_name = 'item_confirm_delete.html'\n success_url = reverse_lazy('gaz_list')\n\n\n\nclass LineChartJSONView(BaseLineChartView):\n def get_labels(self):\n \"\"\"Return 7 labels for the x-axis.\"\"\"\n return [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\"]\n\n\n def get_providers(self):\n \"\"\"Return names of datasets.\"\"\"\n return [\"Central\", \"Eastside\", \"Westside\"]\n\n\n\n def get_data(self):\n \"\"\"Return 3 datasets to plot.\"\"\"\n return [[75, 44, 92, 11, 44, 95, 35],\n [41, 92, 18, 3, 73, 87, 92],\n [87, 21, 94, 3, 90, 13, 65]]\n\n\n # line_chart = TemplateView.as_view(template_name='line_chart.html')\n# line_chart_json = LineChartJSONView.as_view()\n\n\n@login_required\ndef export_users_csv(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"myhome_1_gazoline.csv\"'\n\n writer = csv.writer(response)\n writer.writerow(['id', 'created_date','fuel_type', 'liters','millage','price_after_disc','price_liter','station'])\n\n gaz = gazoline.objects.all().values_list( 'id', 'created_date','fuel_type', 'liters','millage','price_after_disc','price_liter','station')\n for gaz in gaz:\n writer.writerow(gaz)\n\n return response\n\n@login_required\ndef export_orbitrack_csv(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"myhome_1_orbitrack.csv\"'\n\n writer = csv.writer(response)\n writer.writerow(['id', 'distance', 'time', 'speed', 'created'])\n\n orbi = orbi_tmp.objects.all().values_list( 'id', 'distance', 'time', 'speed', 'created')\n for orbi in orbi:\n writer.writerow(orbi)\n\n return response\n\n\ndef get_success_url(self):\n return request.META.get('HTTP_REFERER')\n\n\ndef get_stock(request):\n start_date = date(2016, 3, 12)\n end_date = date(2016, 3, 16)\n stock_info = stock.main()\n return render_to_response( 'stock.html', { 'stock_info': stock_info})\n\n\ndef sec(request):\n secunds = (strftime(\"%H:%M:%S\", gmtime()))\n return HttpResponse(secunds)\n\n\n\nclass orbi_tmpListView(generic.ListView):\n \"\"\"Generic class-based list view for a list of authors.\"\"\"\n model = orbi_tmp\n fields = '__all__'\n \n \n@login_required\n@api_view(['GET', 'POST', 'DELETE'])\n#@authentication_classes([SessionAuthentication, BasicAuthentication])\n#@permission_classes([IsAuthenticated])\ndef arduino(request):\n\n if request.method == 'GET':\n mqtts = mqtt.objects.all()\n \n \n mqtts_serializer = mqttSerializer(mqtts, many=True)\n return JsonResponse(mqtts_serializer.data, safe=False)\n # 'safe=False' for objects serialization\n \n elif request.method == 'POST':\n mqtt_data = JSONParser().parse(request)\n mqtts_serializer = mqttSerializer(data=mqtt_data)\n if mqtts_serializer.is_valid():\n mqtts_serializer.save(created_date=timezone.now())\n return JsonResponse(mqtts_serializer.data, status=status.HTTP_201_CREATED) \n return JsonResponse(mqtts_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n elif request.method == 'DELETE':\n count = mqtt.objects.all().delete()\n return JsonResponse({'message': '{} Tutorials were deleted successfully!'.format(count[0])}, status=status.HTTP_204_NO_CONTENT)\n\n\n \n@api_view(['GET', 'POST', 'DELETE'])\n#@authentication_classes([SessionAuthentication, BasicAuthentication])\n#@permission_classes([IsAuthenticated])\n@login_required\ndef electro(request):\n\n if request.method == 'GET':\n electricitys = electricity.objects.all().filter(param = ('100'))\n \n \n electricity_serializer = ElectricitySerializer(electricitys, many=True)\n return JsonResponse(electricity_serializer.data, safe=False)\n # 'safe=False' for objects serialization\n \n elif request.method == 'POST':\n electricity_data = JSONParser().parse(request)\n electricity_serializer = ElectricitySerializer(data=electricity_data)\n if electricity_serializer.is_valid():\n electricity_serializer.save(created_date=timezone.now())\n return JsonResponse(electricity_serializer.data, status=status.HTTP_201_CREATED) \n return JsonResponse(electricity_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n \n elif request.method == 'DELETE':\n count = electricitys.objects.all().delete()\n return JsonResponse({'message': '{} Tutorials were del successfully!'.format(count[0])}, status=status.HTTP_204_NO_CONTENT)\n\n@login_required\ndef electroshow(request):\n return render_to_response('indicator_electro.html')\n \n \n \nclass ElectroListjson(generics.ListAPIView):\n queryset=electricity.objects.all()\n serializer_class = ElectricitySerializer\n\n \n@login_required \ndef electroshow_all(request):\n return render_to_response('indicator_electro_all.html')\n \n \nclass electricityListView(LoginRequiredMixin,generic.ListView):\n model = electricity\n end_date = datetime.now()\n end_date = end_date#.replace(tzinfo=pytz.timezone(\"Europe/Kiev\"))\n start_date = end_date - timedelta(days=1)\n queryset = electricity.objects.all().order_by('-id')[:500]#filter(param = \"100\", created_date__range=(start_date, end_date)) \n fields = '__all__'\n\n@login_required \ndef electricity_template_day(request):\n query=electricity.objects.all()\n end_date = datetime.now().date()\n start_date = end_date - timedelta(days=30)\n qsstats = QuerySetStats(query, date_field='created_date', aggregate=Max('power'))\n values = qsstats.time_series(start_date, end_date, interval='months')\n query_1 = query.filter(created_date__year=start_date.year)\n return render_to_response( 'template_electricity_google.html', {'query':query_1,'values':values})\n\n@login_required\ndef export_electro_all_csv(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"export_electro_all.csv\"'\n\n writer = csv.writer(response)\n writer.writerow(['param', 'created_date', 'current', 'energy', 'voltage', 'frequency', 'pf', 'power'])\n\n electro = electricity.objects.all().values_list('param', 'created_date', 'current', 'energy', 'voltage', 'frequency', 'pf', 'power')\n for e in electro:\n writer.writerow(e)\n return response\n \n@login_required \ndef electricity_all_graph(request):\n\n end_date = datetime.now()+ timedelta(days=0.2)\n #end_date = end_date.replace(tzinfo=pytz.timezone(\"Europe/Kiev\")) + timedelta(days=0.1)\n start_date = end_date - timedelta(days=1.2)\n queryset_power = electricity.objects.all().filter(param = \"100\", created_date__range=(start_date, end_date))\n #queryset_hum = mqtt.objects.all().filter(topic = \"home/poliv/water\").order_by('-created_date')\n queryset_power = electricity.objects.all().filter(param = \"100\", created_date__range=(start_date, end_date))\n lst = np.asarray(queryset_power.values_list('created_date'))\n \n return render(request, \"line_chart_electro.html\", {\"my_data_power\": queryset_power})\n\n\n\nclass electrometrListView(generic.ListView):\n model = electrometr\n end_date = timezone.now()\n #end_date = end_date.replace(tzinfo=pytz.timezone(\"Europe/Kiev\"))\n start_date = end_date - timedelta(days=1)\n queryset = electrometr.objects.all()#.order_by('-id')[:100]#filter(param = \"100\", created_date__range=(start_date, end_date))\n \n fields = '__all__'\n \n\n\ndef close_to_time(year, month, day, hr, min, sec):\n\n K = datetime(year, month, day, hr, min, sec)\n K = K.replace(tzinfo=pytz.timezone(\"UTC\"))\n end_date = K+ timedelta(minutes=10)\n start_date = K - timedelta(minutes=10)\n queryset_power = electricity.objects.all().filter(param = \"100\", created_date__range=(start_date, end_date))\n lst = np.asarray(queryset_power.values_list('created_date'))\n idx = (np.abs(lst - K)).argmin()\n close_to = lst[idx]\n return close_to\n\n@login_required \ndef electro_zones_old(request):\n month = datetime.today().month\n year = datetime.today().year\n queryset = electrozones.objects.all().filter(created_date__month = month,created_date__year = year).order_by('created_date')\n query_Sum_night = queryset.aggregate(Sum('energy_night'))[\"energy_night__sum\"]\n query_Sum_day = queryset.aggregate(Sum('energy_day'))[\"energy_day__sum\"]\n query_Sum = query_Sum_night+query_Sum_day\n\n price_Sum_night = float(query_Sum_night) * NIGHT_ELECTRO_PRICE\n price_Sum_day = float(query_Sum_day) * DAY_ELECTRO_PRICE\n price_Sum = price_Sum_night + price_Sum_day\n\n queryset_meter = electrozones.objects.all()\n meter_Sum_night = float(queryset_meter.aggregate(Sum('energy_night'))[\"energy_night__sum\"]) + NIGHT_ELECTRO_METER_0\n meter_Sum_day = float(queryset_meter.aggregate(Sum('energy_day'))[\"energy_day__sum\"]) + DAY_ELECTRO_METER_0\n meter_Sum = meter_Sum_day + meter_Sum_night\n\n return render_to_response('electro_zones.html', { 'queryset':queryset, 'energy_sum_night': query_Sum_night, \n 'energy_sum_day': query_Sum_day, 'energy_sum': query_Sum ,\n 'price_sum_day': price_Sum_day, 'price_sum': price_Sum ,\n 'price_sum_night': price_Sum_night, 'day_electro_meter': meter_Sum_day, 'night_electro_meter': meter_Sum_night,\n 'electro_meter': meter_Sum\n })\n \n \ndef electro_zones(request):\n\n if 'e-month' in request.GET:\n month = int(request.GET['e-month'][5:7]) \n year = int(request.GET['e-month'][:4])\n \n else:\n month = datetime.today().month\n year = datetime.today().year\n try:\n queryset = electrozones.objects.all().filter(created_date__month = month,created_date__year = year).order_by('created_date')\n query_Sum_night = queryset.aggregate(Sum('energy_night'))[\"energy_night__sum\"]\n query_Sum_day = queryset.aggregate(Sum('energy_day'))[\"energy_day__sum\"]\n query_Sum = query_Sum_night+query_Sum_day\n\n price_Sum_night = float(query_Sum_night) * NIGHT_ELECTRO_PRICE\n price_Sum_day = float(query_Sum_day) * DAY_ELECTRO_PRICE\n price_Sum = price_Sum_night + price_Sum_day\n\n queryset_meter = electrozones.objects.all().filter(created_date__month__lte = month,created_date__year__lte = year)\n meter_Sum_night = float(queryset_meter.aggregate(Sum('energy_night'))[\"energy_night__sum\"]) + NIGHT_ELECTRO_METER_0\n meter_Sum_day = float(queryset_meter.aggregate(Sum('energy_day'))[\"energy_day__sum\"]) + DAY_ELECTRO_METER_0\n meter_Sum = meter_Sum_day + meter_Sum_night\n\n return render_to_response('electro_zones.html', { 'queryset':queryset, 'energy_sum_night': query_Sum_night, \n 'energy_sum_day': query_Sum_day, 'energy_sum': query_Sum ,\n 'price_sum_day': price_Sum_day, 'price_sum': price_Sum ,\n 'price_sum_night': price_Sum_night, 'day_electro_meter': meter_Sum_day, 'night_electro_meter': meter_Sum_night,\n 'electro_meter': meter_Sum\n })\n except:\n return render_to_response('electro_zones.html')\n\n\n\nfrom django.http import HttpResponse\nlogger = logging.getLogger(__name__)\ndef index(request):\n logger.error(\"Test!!\")\n return HttpResponse(\"Hello logging world.\")\n\n\n\n\n@login_required\ndef temperature(request):\n \n with open('/home/pi/iot/myhome/myhome/humidity.csv', 'r') as f:\n opened_file = f.readlines()\n var = opened_file[-1].split(',')\n print(var)\n \n return render(request, \"temperature.html\", {'temperature':var[2],'humidity':var[3] , 'time':var[1]} )","sub_path":"myhome_1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"338291960","text":"# -*- coding: utf-8 -*-\n\"\"\"\n✏️safer: a safer file writer ✏️\n-------------------------------\n\nNo more partial writes or corruption! ``safer`` writes a whole file or\nnothing.\n\n``safer.writer()`` and ``safer.printer()`` are context managers that open a\nfile for writing or printing: if an Exception is raised, then the original file\nis left unaltered.\n\nInstall ``safer`` from the command line using\n`pip `_:\n\n.. code-block:: bash\n\n pip install safer\n\nTested on Python 2.7, and 3.4 through 3.8.\n\"\"\"\n\nfrom __future__ import print_function\nimport contextlib\nimport functools\nimport os\nimport shutil\nimport tempfile\ntry:\n from pathlib import Path\nexcept ImportError:\n Path = None\n\n__version__ = '1.0.1'\n__all__ = 'writer', 'printer'\n\n\n@contextlib.contextmanager\ndef writer(\n file,\n mode='w',\n create_parent=False,\n delete_failures=True,\n **kwargs\n):\n \"\"\"\n A context manager that yields {result}, but leaves the file unchanged\n if an exception is raised.\n\n It uses an extra temporary file which is renamed over the file only after\n the context manager exits successfully: this requires as much disk space\n as the old and new files put together.\n\n If ``mode`` contains either ``'a'`` (append), or ``'+'`` (update), then\n the original file will be copied to the temporary file before writing\n starts.\n\n Arguments:\n file:\n Path to the file to be opened\n\n mode:\n Mode string passed to ``open()``\n\n create_parent:\n If true, create the parent directory of the file if it doesn't exist\n\n delete_failures:\n If true, the temporary file is deleted if there is an exception\n\n kwargs:\n Keywords passed to ``open()``\n \"\"\"\n copy = '+' in mode or 'a' in mode\n if not copy and 'r' in mode:\n raise IOError('File not open for writing')\n\n if Path and isinstance(file, Path):\n file = str(file)\n elif not isinstance(file, str):\n raise IOError('`file` argument must be a string')\n\n parent = os.path.dirname(os.path.abspath(file))\n if not os.path.exists(parent) and create_parent:\n os.makedirs(parent)\n\n fd, out = tempfile.mkstemp(dir=parent)\n os.close(fd)\n\n if copy and os.path.exists(file):\n shutil.copy2(file, out)\n\n try:\n with open(out, mode, **kwargs) as fp:\n yield fp\n\n except Exception:\n if delete_failures and os.path.exists(out):\n try:\n os.remove(out)\n except Exception:\n pass\n raise\n\n if not copy:\n if os.path.exists(file):\n shutil.copymode(file, out)\n else:\n os.chmod(out, 0o100644)\n\n os.rename(out, file)\n\n\n@functools.wraps(writer)\n@contextlib.contextmanager\ndef printer(*args, **kwargs):\n with writer(*args, **kwargs) as fp:\n yield functools.partial(print, file=fp)\n\n\nprinter.__doc__ = printer.__doc__.format(\n result='a function that prints to the opened file'\n)\nwriter.__doc__ = writer.__doc__.format(\n result='a writable stream returned from open()'\n)\n\nwriter._examples = \"\"\"\\\n# dangerous\nwith open(file, 'w') as fp:\n json.dump(data, fp) # If this fails, the file is corrupted\n\n# safer\nwith safer.writer(file) as fp:\n json.dump(data, fp) # If this fails, the file is unaltered\n\"\"\"\n\nprinter._examples = \"\"\"\\\n# dangerous\nwith open(file, 'w') as fp:\n for item in items:\n print(item, file=fp)\n # Prints a partial file if ``items`` raises an exception while iterating\n # or any ``item.__str__()`` raises an exception\n\n# safer\nwith safer.printer(file) as print:\n for item in items:\n print(item)\n # Either the whole file is written, or nothing\n\"\"\"\n","sub_path":"safer.py","file_name":"safer.py","file_ext":"py","file_size_in_byte":3734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"637541799","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport re\nfrom capstone import Cs, CS_ARCH_X86, CS_ARCH_ARM, CS_MODE_16, CS_MODE_32, CS_MODE_64, CS_MODE_ARM\nfrom string import ascii_letters, digits\nfrom enum import Enum\n\nfrom .. import arg, Unit\n\n__all__ = ['asm']\n\n\nclass box:\n def __init__(self, **kw):\n for k, v in kw.items():\n setattr(self, k, v)\n\n\nclass ARCH(Enum):\n x16 = CS_ARCH_X86, CS_MODE_16\n x32 = CS_ARCH_X86, CS_MODE_32\n x64 = CS_ARCH_X86, CS_MODE_64\n arm = CS_ARCH_ARM, CS_MODE_ARM\n\n def __str__(self):\n return self.name\n\n\nclass asm(Unit):\n \"\"\"\n Disassembles the input data using the capstone disassembly library.\n \"\"\"\n\n def __init__(\n self,\n mode : arg.option(choices=ARCH, help='select one of the following architectures: {choices}; the default is {default}.') = ARCH.x32,\n addr : arg.switch('-a', help='hide addresses of instruction') = True,\n bytes : arg.switch('-b', help='hide instruction bytes next to disassembly') = True,\n str : arg.switch('-s', help='disassemble over detected strings') = True,\n zeros : arg.switch('-z', help='disassemble zero byte patches') = True,\n width : arg.number('-w', bound=(3, None), help='number of data bytes to put in one row, {default} by default') = 15\n ):\n mode = arg.as_option(mode, ARCH)\n self.superinit(super(), **vars())\n\n def _printable(self, b):\n return 0x20 <= b <= 0x7E and b not in B' \\t\\v\\r\\n'\n\n def _strings(self, data):\n if not self.args.str:\n return\n for match in re.finditer(BR'([ -~]{5,})\\x00?|((?:[ -~]\\x00){5,})(?:\\x00\\x00)?', data):\n string = match[0]\n try:\n string = string.decode('UTF16-LE') if not string.end \\\n else string.decode('UTF8')\n except Exception:\n continue\n alpha = len([x for x in string if x in digits or x in ascii_letters])\n if 2 * alpha > len(string):\n yield box(start=match.start(), end=match.end(), data=string)\n self.log_info(F'detected string at {match.start():08X}:', string)\n\n def _format(self, addr=0, data=B'', code='', arg='', comment=''):\n data_str = ''.join('%c' % X if self._printable(X) else '.' for X in data)\n data_hex = ' '.join('%02X' % X for X in data)\n if comment: comment = ' ; ' + comment\n return {\n 'addr': addr,\n 'str': data_str,\n 'hex': data_hex,\n 'code': code,\n 'arg': arg,\n 'comment': comment\n }\n\n def _bytepatch(self, data, addr, end):\n return self._format(addr, data[addr:end], 'db', ','.join('%02X' % b for b in data[addr:end]))\n\n def _nullsize(self, data, offset, max):\n length = 0\n try:\n while not data[offset + length] and length < max:\n length += 1\n except IndexError:\n pass\n return length\n\n def _disassemble(self, data):\n capstone = Cs(*self.args.mode.value)\n strz = self._strings(data)\n string = next(strz, None)\n cursor, done = 0, 0\n while done < len(data):\n cursor = max(cursor, done)\n patchsize = self._nullsize(data, cursor, self.args.width)\n if patchsize > 2:\n yield self._format(done, data[done:done + patchsize], 'db', ','.join('0' * patchsize))\n done += patchsize\n continue\n if cursor >= len(data):\n yield self._bytepatch(data, done, len(data))\n done = cursor\n if string and cursor >= string.end:\n yield self._bytepatch(data, done, string.start)\n yield self._format(string.start, data[string.start:string.end], 'db', string.data)\n done = string.end\n continue\n try:\n ins = next(capstone.disasm(\n data[cursor:cursor + 15], cursor, count=1))\n end = ins.address + ins.size\n if self.args.str and string:\n if end > string.start and string.end > cursor:\n cursor = string.end\n continue\n except StopIteration:\n cursor += 1\n continue\n else:\n yield self._format(ins.address, ins.bytes, ins.mnemonic, ins.op_str)\n done = end\n\n def process(self, data):\n disassembly = list(self._disassemble(data))\n for key in ['hex', 'str', 'code', 'arg']:\n m = max(len(r[key]) for r in disassembly)\n for r in disassembly:\n r[key] = r[key].ljust(m)\n line_format = '{code} {arg}{comment}'\n if self.args.bytes:\n line_format = '{hex} {str} ' + line_format\n if self.args.addr:\n line_format = '0x{addr:08X}: ' + line_format\n return '\\n'.join(line_format.format(**r) for r in disassembly).encode(self.codec)\n","sub_path":"refinery/units/sinks/asm.py","file_name":"asm.py","file_ext":"py","file_size_in_byte":5047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"113308298","text":"# -*- coding: utf-8 -*-\n# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0\n# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt\n\n\"\"\"Tests that our test infrastructure is really working!\"\"\"\n\nimport datetime\nimport os\nimport re\nimport sys\n\nimport pytest\n\nimport coverage\nfrom coverage.backunittest import TestCase, unittest\nfrom coverage.files import actual_path\nfrom coverage.misc import StopEverything\nimport coverage.optional\n\nfrom tests.coveragetest import CoverageTest, convert_skip_exceptions\nfrom tests.helpers import arcs_to_arcz_repr, arcz_to_arcs\nfrom tests.helpers import CheckUniqueFilenames, re_lines, re_line\n\n\ndef test_xdist_sys_path_nuttiness_is_fixed():\n # See conftest.py:fix_xdist_sys_path\n assert sys.path[1] != ''\n assert os.environ.get('PYTHONPATH') is None\n\n\nclass TestingTest(TestCase):\n \"\"\"Tests of helper methods on `backunittest.TestCase`.\"\"\"\n\n def test_assert_count_equal(self):\n self.assertCountEqual(set(), set())\n self.assertCountEqual(set([1,2,3]), set([3,1,2]))\n with self.assertRaises(AssertionError):\n self.assertCountEqual(set([1,2,3]), set())\n with self.assertRaises(AssertionError):\n self.assertCountEqual(set([1,2,3]), set([4,5,6]))\n\n\nclass CoverageTestTest(CoverageTest):\n \"\"\"Test the methods in `CoverageTest`.\"\"\"\n\n def test_file_exists(self):\n self.make_file(\"whoville.txt\", \"We are here!\")\n self.assert_exists(\"whoville.txt\")\n self.assert_doesnt_exist(\"shadow.txt\")\n msg = \"False is not true : File 'whoville.txt' shouldn't exist\"\n with self.assertRaisesRegex(AssertionError, msg):\n self.assert_doesnt_exist(\"whoville.txt\")\n msg = \"False is not true : File 'shadow.txt' should exist\"\n with self.assertRaisesRegex(AssertionError, msg):\n self.assert_exists(\"shadow.txt\")\n\n def test_file_count(self):\n self.make_file(\"abcde.txt\", \"abcde\")\n self.make_file(\"axczz.txt\", \"axczz\")\n self.make_file(\"afile.txt\", \"afile\")\n self.assert_file_count(\"a*.txt\", 3)\n self.assert_file_count(\"*c*.txt\", 2)\n self.assert_file_count(\"afile.*\", 1)\n self.assert_file_count(\"*.q\", 0)\n msg = re.escape(\n \"3 != 13 : There should be 13 files matching 'a*.txt', but there are these: \"\n \"['abcde.txt', 'afile.txt', 'axczz.txt']\"\n )\n with self.assertRaisesRegex(AssertionError, msg):\n self.assert_file_count(\"a*.txt\", 13)\n msg = re.escape(\n \"2 != 12 : There should be 12 files matching '*c*.txt', but there are these: \"\n \"['abcde.txt', 'axczz.txt']\"\n )\n with self.assertRaisesRegex(AssertionError, msg):\n self.assert_file_count(\"*c*.txt\", 12)\n msg = re.escape(\n \"1 != 11 : There should be 11 files matching 'afile.*', but there are these: \"\n \"['afile.txt']\"\n )\n with self.assertRaisesRegex(AssertionError, msg):\n self.assert_file_count(\"afile.*\", 11)\n msg = re.escape(\n \"0 != 10 : There should be 10 files matching '*.q', but there are these: []\"\n )\n with self.assertRaisesRegex(AssertionError, msg):\n self.assert_file_count(\"*.q\", 10)\n\n def test_assert_startwith(self):\n self.assert_starts_with(\"xyzzy\", \"xy\")\n self.assert_starts_with(\"xyz\\nabc\", \"xy\")\n self.assert_starts_with(\"xyzzy\", (\"x\", \"z\"))\n msg = re.escape(\"'xyz' doesn't start with 'a'\")\n with self.assertRaisesRegex(AssertionError, msg):\n self.assert_starts_with(\"xyz\", \"a\")\n msg = re.escape(\"'xyz\\\\nabc' doesn't start with 'a'\")\n with self.assertRaisesRegex(AssertionError, msg):\n self.assert_starts_with(\"xyz\\nabc\", \"a\")\n\n def test_assert_recent_datetime(self):\n def now_delta(seconds):\n \"\"\"Make a datetime `seconds` seconds from now.\"\"\"\n return datetime.datetime.now() + datetime.timedelta(seconds=seconds)\n\n # Default delta is 10 seconds.\n self.assert_recent_datetime(now_delta(0))\n self.assert_recent_datetime(now_delta(-9))\n with self.assertRaises(AssertionError):\n self.assert_recent_datetime(now_delta(-11))\n with self.assertRaises(AssertionError):\n self.assert_recent_datetime(now_delta(1))\n\n # Delta is settable.\n self.assert_recent_datetime(now_delta(0), seconds=120)\n self.assert_recent_datetime(now_delta(-100), seconds=120)\n with self.assertRaises(AssertionError):\n self.assert_recent_datetime(now_delta(-1000), seconds=120)\n with self.assertRaises(AssertionError):\n self.assert_recent_datetime(now_delta(1), seconds=120)\n\n def test_assert_warnings(self):\n cov = coverage.Coverage()\n\n # Make a warning, it should catch it properly.\n with self.assert_warnings(cov, [\"Hello there!\"]):\n cov._warn(\"Hello there!\")\n\n # The expected warnings are regexes.\n with self.assert_warnings(cov, [\"Hello.*!\"]):\n cov._warn(\"Hello there!\")\n\n # There can be a bunch of actual warnings.\n with self.assert_warnings(cov, [\"Hello.*!\"]):\n cov._warn(\"You there?\")\n cov._warn(\"Hello there!\")\n\n # There can be a bunch of expected warnings.\n with self.assert_warnings(cov, [\"Hello.*!\", \"You\"]):\n cov._warn(\"You there?\")\n cov._warn(\"Hello there!\")\n\n # But if there are a bunch of expected warnings, they have to all happen.\n warn_regex = r\"Didn't find warning 'You' in \\['Hello there!'\\]\"\n with self.assertRaisesRegex(AssertionError, warn_regex):\n with self.assert_warnings(cov, [\"Hello.*!\", \"You\"]):\n cov._warn(\"Hello there!\")\n\n # Make a different warning than expected, it should raise an assertion.\n warn_regex = r\"Didn't find warning 'Not me' in \\['Hello there!'\\]\"\n with self.assertRaisesRegex(AssertionError, warn_regex):\n with self.assert_warnings(cov, [\"Not me\"]):\n cov._warn(\"Hello there!\")\n\n # Try checking a warning that shouldn't appear: happy case.\n with self.assert_warnings(cov, [\"Hi\"], not_warnings=[\"Bye\"]):\n cov._warn(\"Hi\")\n\n # But it should fail if the unexpected warning does appear.\n warn_regex = r\"Found warning 'Bye' in \\['Hi', 'Bye'\\]\"\n with self.assertRaisesRegex(AssertionError, warn_regex):\n with self.assert_warnings(cov, [\"Hi\"], not_warnings=[\"Bye\"]):\n cov._warn(\"Hi\")\n cov._warn(\"Bye\")\n\n # assert_warnings shouldn't hide a real exception.\n with self.assertRaisesRegex(ZeroDivisionError, \"oops\"):\n with self.assert_warnings(cov, [\"Hello there!\"]):\n raise ZeroDivisionError(\"oops\")\n\n def test_assert_no_warnings(self):\n cov = coverage.Coverage()\n\n # Happy path: no warnings.\n with self.assert_warnings(cov, []):\n pass\n\n # If you said there would be no warnings, and there were, fail!\n warn_regex = r\"Unexpected warnings: \\['Watch out!'\\]\"\n with self.assertRaisesRegex(AssertionError, warn_regex):\n with self.assert_warnings(cov, []):\n cov._warn(\"Watch out!\")\n\n def test_sub_python_is_this_python(self):\n # Try it with a Python command.\n self.set_environ('COV_FOOBAR', 'XYZZY')\n self.make_file(\"showme.py\", \"\"\"\\\n import os, sys\n print(sys.executable)\n print(os.__file__)\n print(os.environ['COV_FOOBAR'])\n \"\"\")\n out = self.run_command(\"python showme.py\").splitlines()\n self.assertEqual(actual_path(out[0]), actual_path(sys.executable))\n self.assertEqual(out[1], os.__file__)\n self.assertEqual(out[2], 'XYZZY')\n\n # Try it with a \"coverage debug sys\" command.\n out = self.run_command(\"coverage debug sys\")\n\n executable = re_line(out, \"executable:\")\n executable = executable.split(\":\", 1)[1].strip()\n self.assertTrue(_same_python_executable(executable, sys.executable))\n\n # \"environment: COV_FOOBAR = XYZZY\" or \"COV_FOOBAR = XYZZY\"\n environ = re_line(out, \"COV_FOOBAR\")\n _, _, environ = environ.rpartition(\":\")\n self.assertEqual(environ.strip(), \"COV_FOOBAR = XYZZY\")\n\n def test_run_command_stdout_stderr(self):\n # run_command should give us both stdout and stderr.\n self.make_file(\"outputs.py\", \"\"\"\\\n import sys\n sys.stderr.write(\"StdErr\\\\n\")\n print(\"StdOut\")\n \"\"\")\n out = self.run_command(\"python outputs.py\")\n self.assertIn(\"StdOut\\n\", out)\n self.assertIn(\"StdErr\\n\", out)\n\n\nclass CheckUniqueFilenamesTest(CoverageTest):\n \"\"\"Tests of CheckUniqueFilenames.\"\"\"\n\n run_in_temp_dir = False\n\n class Stub(object):\n \"\"\"A stand-in for the class we're checking.\"\"\"\n def __init__(self, x):\n self.x = x\n\n def method(self, filename, a=17, b=\"hello\"):\n \"\"\"The method we'll wrap, with args to be sure args work.\"\"\"\n return (self.x, filename, a, b)\n\n def test_detect_duplicate(self):\n stub = self.Stub(23)\n CheckUniqueFilenames.hook(stub, \"method\")\n\n # Two method calls with different names are fine.\n assert stub.method(\"file1\") == (23, \"file1\", 17, \"hello\")\n assert stub.method(\"file2\", 1723, b=\"what\") == (23, \"file2\", 1723, \"what\")\n\n # A duplicate file name trips an assertion.\n with self.assertRaises(AssertionError):\n stub.method(\"file1\")\n\n\n@pytest.mark.parametrize(\"text, pat, result\", [\n (\"line1\\nline2\\nline3\\n\", \"line\", \"line1\\nline2\\nline3\\n\"),\n (\"line1\\nline2\\nline3\\n\", \"[13]\", \"line1\\nline3\\n\"),\n (\"line1\\nline2\\nline3\\n\", \"X\", \"\"),\n])\ndef test_re_lines(text, pat, result):\n assert re_lines(text, pat) == result\n\n@pytest.mark.parametrize(\"text, pat, result\", [\n (\"line1\\nline2\\nline3\\n\", \"line\", \"\"),\n (\"line1\\nline2\\nline3\\n\", \"[13]\", \"line2\\n\"),\n (\"line1\\nline2\\nline3\\n\", \"X\", \"line1\\nline2\\nline3\\n\"),\n])\ndef test_re_lines_inverted(text, pat, result):\n assert re_lines(text, pat, match=False) == result\n\n@pytest.mark.parametrize(\"text, pat, result\", [\n (\"line1\\nline2\\nline3\\n\", \"2\", \"line2\"),\n])\ndef test_re_line(text, pat, result):\n assert re_line(text, pat) == result\n\n@pytest.mark.parametrize(\"text, pat\", [\n (\"line1\\nline2\\nline3\\n\", \"line\"), # too many matches\n (\"line1\\nline2\\nline3\\n\", \"X\"), # no matches\n])\ndef test_re_line_bad(text, pat):\n with pytest.raises(AssertionError):\n re_line(text, pat)\n\n\ndef test_convert_skip_exceptions():\n @convert_skip_exceptions\n def some_method(ret=None, exc=None):\n \"\"\"Be like a test case.\"\"\"\n if exc:\n raise exc(\"yikes!\")\n return ret\n\n # Normal flow is normal.\n assert some_method(ret=[17, 23]) == [17, 23]\n\n # Exceptions are raised normally.\n with pytest.raises(ValueError):\n some_method(exc=ValueError)\n\n # But a StopEverything becomes a SkipTest.\n with pytest.raises(unittest.SkipTest):\n some_method(exc=StopEverything)\n\n\ndef _same_python_executable(e1, e2):\n \"\"\"Determine if `e1` and `e2` refer to the same Python executable.\n\n Either path could include symbolic links. The two paths might not refer\n to the exact same file, but if they are in the same directory and their\n numeric suffixes aren't different, they are the same executable.\n\n \"\"\"\n e1 = os.path.abspath(os.path.realpath(e1))\n e2 = os.path.abspath(os.path.realpath(e2))\n\n if os.path.dirname(e1) != os.path.dirname(e2):\n return False # pragma: only failure\n\n e1 = os.path.basename(e1)\n e2 = os.path.basename(e2)\n\n if e1 == \"python\" or e2 == \"python\" or e1 == e2:\n # Python and Python2.3: OK\n # Python2.3 and Python: OK\n # Python and Python: OK\n # Python2.3 and Python2.3: OK\n return True\n\n return False # pragma: only failure\n\n\ndef test_optional_without():\n # pylint: disable=reimported\n from coverage.optional import toml as toml1\n with coverage.optional.without('toml'):\n from coverage.optional import toml as toml2\n from coverage.optional import toml as toml3\n\n assert toml1 is toml3 is not None\n assert toml2 is None\n\n\n@pytest.mark.parametrize(\"arcz, arcs\", [\n (\".1 12 2.\", [(-1, 1), (1, 2), (2, -1)]),\n (\"-11 12 2-5\", [(-1, 1), (1, 2), (2, -5)]),\n (\"-QA CB IT Z-A\", [(-26, 10), (12, 11), (18, 29), (35, -10)]),\n])\ndef test_arcz_to_arcs(arcz, arcs):\n assert arcz_to_arcs(arcz) == arcs\n\n\n@pytest.mark.parametrize(\"arcs, arcz_repr\", [\n ([(-1, 1), (1, 2), (2, -1)], \"(-1, 1) # .1\\n(1, 2) # 12\\n(2, -1) # 2.\\n\"),\n ([(-1, 1), (1, 2), (2, -5)], \"(-1, 1) # .1\\n(1, 2) # 12\\n(2, -5) # 2-5\\n\"),\n ([(-26, 10), (12, 11), (18, 29), (35, -10), (1, 33), (100, 7)],\n (\n \"(-26, 10) # -QA\\n\"\n \"(12, 11) # CB\\n\"\n \"(18, 29) # IT\\n\"\n \"(35, -10) # Z-A\\n\"\n \"(1, 33) # 1X\\n\"\n \"(100, 7) # ?7\\n\"\n )\n ),\n])\ndef test_arcs_to_arcz_repr(arcs, arcz_repr):\n assert arcs_to_arcz_repr(arcs) == arcz_repr\n","sub_path":"tests/test_testing.py","file_name":"test_testing.py","file_ext":"py","file_size_in_byte":13344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"527032738","text":"import logging\nimport os\n\nimport hydra\nimport nltk\nimport pytorch_lightning as pl\nfrom omegaconf import OmegaConf\nfrom pytorch_lightning.callbacks import (\n EarlyStopping,\n LearningRateMonitor,\n ModelCheckpoint,\n)\nfrom pytorch_lightning.utilities.device_parser import num_cuda_devices\nfrom wandb import Artifact\n\nfrom conf import TrainConfig\nfrom src.data_utils import CMCDataModule\nfrom src.model import CMCModule\nfrom src.utils import WandbOrganizer\n\nnltk.download(\"wordnet\")\n\n\n@hydra.main(version_base=\"1.1\", config_path=\"conf\", config_name=\"train_config\")\ndef main(cfg: TrainConfig) -> None:\n # -----------------------\n # - init -\n # -----------------------\n pl.seed_everything(42)\n\n world_size = None\n if cfg.trainer.accelerator == \"cpu\":\n world_size = 1\n elif cfg.trainer.accelerator == \"gpu\":\n if cfg.trainer.devices == \"auto\":\n world_size = num_cuda_devices() # all available gpus\n elif isinstance(cfg.trainer.devices, int):\n if cfg.trainer.devices == -1:\n world_size = num_cuda_devices() # all available gpus\n else:\n world_size = cfg.trainer.devices # n first gpus\n elif isinstance(cfg.trainer.devices, str):\n if cfg.trainer.devices == \"-1\":\n world_size = num_cuda_devices() # all available gpus\n else:\n world_size = len(cfg.trainer.devices.split(\",\")) # a list of specific gpus separated by ','\n elif isinstance(cfg.trainer.devices, list):\n world_size = len(cfg.trainer.devices) # a list of specific gpus\n\n if world_size is None:\n raise ValueError(\"Unknown format for number of gpus\")\n local_rank = int(os.environ.get(\"LOCAL_RANK\", 0))\n logging.info(f\"Local rank: {local_rank}\")\n logging.info(f\"World size: {world_size}\")\n\n dm = CMCDataModule(\n dataset_cfg=cfg.dataset,\n model_cfg=cfg.model,\n input_cfg=cfg.input,\n local_rank=local_rank,\n world_size=world_size,\n shift_labels=cfg.model.configuration != \"decoder\",\n process_retrieved=cfg.model.configuration == \"race\",\n )\n\n if local_rank == 0:\n dm.prepare_data()\n dm.setup(stage=\"fit\")\n\n batch_size = cfg.dataset.train_dataloader_conf.batch_size * cfg.trainer.accumulate_grad_batches * world_size\n\n # main module with model logic\n model = CMCModule(\n model_cfg=cfg.model,\n diff_tokenizer=dm.diff_tokenizer,\n msg_tokenizer=dm.msg_tokenizer,\n learning_rate=cfg.optimizer.learning_rate,\n initial_batch_size=cfg.optimizer.initial_batch_size,\n weight_decay=cfg.optimizer.weight_decay,\n num_warmup_steps=cfg.optimizer.num_warmup_steps,\n ratio_warmup_steps=cfg.optimizer.ratio_warmup_steps,\n batch_size=batch_size,\n )\n cfg.optimizer.learning_rate = model.learning_rate\n\n run_name = WandbOrganizer.get_run_name(\n cfg.model,\n encoder_input_type=cfg.input.encoder_input_type,\n train_with_history=cfg.input.train_with_history,\n )\n run_tags = WandbOrganizer.get_tags_train(\n cfg.model,\n encoder_input_type=cfg.input.encoder_input_type,\n train_with_history=cfg.input.train_with_history,\n )\n\n # logger\n if cfg.logger.use_wandb:\n if cfg.logger.use_api_key:\n with open(hydra.utils.to_absolute_path(\"wandb_api_key.txt\"), \"r\") as f:\n os.environ[\"WANDB_API_KEY\"] = f.read().strip()\n trainer_logger = pl.loggers.WandbLogger(\n name=run_name,\n project=cfg.logger.project,\n config=OmegaConf.to_container(cfg, resolve=True),\n tags=run_tags,\n job_type=\"train\",\n )\n trainer_logger.watch(model, log=\"gradients\", log_freq=250)\n\n # callbacks\n lr_logger = LearningRateMonitor(logging_interval=\"step\")\n checkpoint_callback = ModelCheckpoint(\n dirpath=f\"{run_name}_checkpoint\",\n save_top_k=1,\n save_last=True,\n verbose=True,\n monitor=\"val_MRR_top5\",\n mode=\"max\",\n )\n early_stopping_callback = EarlyStopping(monitor=\"val_loss\", mode=\"min\", verbose=True)\n\n # trainer\n trainer = pl.Trainer(\n **cfg.trainer, # type: ignore[arg-type]\n logger=trainer_logger if cfg.logger.use_wandb else True,\n callbacks=[lr_logger, checkpoint_callback, early_stopping_callback],\n )\n\n # -----------------------\n # zero-shot validation -\n # -----------------------\n trainer.validate(model, dm)\n\n # -----------------------\n # train -\n # -----------------------\n trainer.fit(model, dm)\n\n # -----------------------\n # save ckpt to wandb -\n # -----------------------\n if cfg.logger.use_wandb and cfg.logger.save_artifact:\n artifact = Artifact(\n name=run_name,\n type=\"model\",\n metadata={\"tags\": run_tags},\n )\n artifact.add_dir(f\"{run_name}_checkpoint\")\n trainer_logger.experiment.log_artifact(artifact)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"63090787","text":"\"\"\"DynamoDB Client.\n\nProvides a client for DynamoDB.\n\"\"\"\n\nimport logging\n\nimport boto3\nfrom boto3.dynamodb.conditions import Attr\nfrom botocore.exceptions import ClientError\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\n\n\nclass DynamoDBClient:\n\n def __init__(self, table_name: str, hash_key: str = None):\n self.dynamodb = boto3.resource('dynamodb')\n\n try:\n self.hash_key = hash_key\n self.table = self.dynamodb.Table(table_name)\n except ClientError as err:\n log.error(err)\n raise\n\n def get_item(self, partition_key_value: str) -> dict:\n log.debug(\n f'Querying table \"{self.table.table_name}\" for item with '\n f'hash value {partition_key_value}'\n )\n try:\n return self.table.get_item(\n Key={self.hash_key: partition_key_value},\n ConsistentRead=True\n )\n except ClientError as err:\n log.error(err)\n raise\n","sub_path":"notifier/dynamodb.py","file_name":"dynamodb.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"439485322","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n\nimport json\nimport time\nimport os\nimport base64\nimport urllib\nimport logging\nimport requests\n\nfrom concurrent.futures import as_completed\nfrom requests_futures.sessions import FuturesSession\nfrom google.appengine.ext import ndb\nfrom google.appengine.api import memcache\n\ndef get_oauth_headers():\n oauth_token = memcache.get('oauth_bearer_token')\n if oauth_token is None:\n path = os.path.join(os.path.split(__file__)[0], 'api-auth.json')\n authdata = json.load(open(path))\n\n credentials = \"{}:{}\".format(authdata['blizzard_client_id'], authdata['blizzard_client_secret'])\n encoded_credentials = base64.b64encode(credentials.encode('ascii')).decode('ascii')\n headers = {'Authorization': f'Basic {encoded_credentials}'}\n\n r = requests.post('https://us.battle.net/oauth/token',\n data={'grant_type': 'client_credentials'},\n headers=headers)\n\n if r.status_code == 200:\n response_data = r.json()\n oauth_token = response_data['access_token']\n\n # Blizzard sends an expiration time for the token in the response,\n # but we want to make sure that our memcache expires before they\n # do. Subtract 60s off that so we make sure to re-request before\n # it expires.\n expiration = int(response_data['expires_in']) - 60\n memcache.set('oauth_bearer_token', oauth_token, time=expiration)\n\n if oauth_token is None:\n return {}\n\n return {'Authorization': 'Bearer ' + oauth_token}\n\nclass ClassEntry(ndb.Model):\n classId = ndb.IntegerProperty()\n name = ndb.StringProperty()\n\n @classmethod\n def get_mapping(cls):\n results = cls.query().fetch()\n if results:\n return dict((x.classId, x.name) for x in results)\n return {}\n\nclass Realm(ndb.Model):\n realm = ndb.StringProperty(indexed=True, required=True)\n slug = ndb.StringProperty(indexed=True, required=True)\n\n @classmethod\n def query_realm(cls, toon_realm):\n result = cls.query(cls.slug == toon_realm, namespace='Realms').fetch(1)[0]\n if result:\n return result.realm\n return ''\n\nclass Importer(object):\n\n # Each quality rank has its own list to allow for adjusting whether\n # low-rank enchants count as lesser enchants or not.\n\n WEAPON_ENCHANTS_Q1 = [\n 6629, # Burning Devotion (Quality: 1)\n 6635, # Earthen Devotion (Quality: 1)\n 6647, # Frozen Devotion (Quality: 1)\n 6641, # Sophic Devotion (Quality: 1)\n 6653, # Wafting Devotion (Quality: 1)\n 6526, # High Intensity Thermal Scanner (Quality: 1)\n ]\n\n WEAPON_ENCHANTS_Q2 = [\n 6630, # Burning Devotion (Quality: 2)\n 6636, # Earthen Devotion (Quality: 2)\n 6648, # Frozen Devotion (Quality: 2)\n 6642, # Sophic Devotion (Quality: 2)\n 6654, # Wafting Devotion (Quality: 2)\n 6527, # High Intensity Thermal Scanner (Quality: 2)\n ]\n\n WEAPON_ENCHANTS_Q3 = [\n 6631, # Burning Devotion (Quality: 3)\n 6637, # Earthen Devotion (Quality: 3)\n 6649, # Frozen Devotion (Quality: 3)\n 6643, # Sophic Devotion (Quality: 3)\n 6655, # Wafting Devotion (Quality: 3)\n 6528, # High Intensity Thermal Scanner (Quality: 3)\n ]\n \n DEATH_KNIGHT_RUNEFORGES = [\n 3368, # Fallen Crusader\n 3380, # Razorice\n 6241, # Sanguination\n 6243, # Hysteria\n ]\n\n BRACER_ENCHANTS_Q1 = [\n 6572, # Devotion Of Avoidance (Quality: 1)\n 6578, # Devotion Of Leech (Quality: 1)\n 6584, # Devotion Of Speed (Quality: 1)\n ]\n\n BRACER_ENCHANTS_Q2 = [\n 6573, # Devotion Of Avoidance (Quality: 2)\n 6579, # Devotion Of Leech (Quality: 2)\n 6585, # Devotion Of Speed (Quality: 2)\n ]\n\n BRACER_ENCHANTS_Q3 = [\n 6574, # Devotion Of Avoidance (Quality: 3)\n 6580, # Devotion Of Leech (Quality: 3)\n 6586, # Devotion Of Speed (Quality: 3)\n ]\n\n RING_ENCHANTS_Q1 = [\n 6548, # Devotion Of Critical Strike (Quality: 1)\n 6554, # Devotion Of Haste (Quality: 1)\n 6560, # Devotion Of Mastery (Quality: 1)\n 6566, # Devotion Of Versatility (Quality: 1)\n ]\n\n RING_ENCHANTS_Q2 = [\n 6549, # Devotion Of Critical Strike (Quality: 2)\n 6555, # Devotion Of Haste (Quality: 2)\n 6561, # Devotion Of Mastery (Quality: 2)\n 6567, # Devotion Of Versatility (Quality: 2)\n ]\n\n RING_ENCHANTS_Q3 = [\n 6550, # Devotion Of Critical Strike (Quality: 3)\n 6556, # Devotion Of Haste (Quality: 3)\n 6562, # Devotion Of Mastery (Quality: 3)\n 6568, # Devotion Of Versatility (Quality: 3)\n ]\n\n CLOAK_ENCHANTS_Q1 = [\n 6590, # Graceful Avoidance (Quality: 1)\n 6602, # Homebound Speed (Quality: 1)\n 6596, # Regenerative Leech (Quality: 1)\n ]\n\n CLOAK_ENCHANTS_Q2 = [\n 6591, # Graceful Avoidance (Quality: 2)\n 6603, # Homebound Speed (Quality: 2)\n 6597, # Regenerative Leech (Quality: 2)\n ]\n\n CLOAK_ENCHANTS_Q3 = [\n 6592, # Graceful Avoidance (Quality: 3)\n 6604, # Homebound Speed (Quality: 3)\n 6598, # Regenerative Leech (Quality: 3)\n ]\n\n LEG_ENCHANTS_Q1 = [\n 6494, # Frosted Armor Kit (Quality: 1)\n 6488, # Fierce Armor Kit (Quality: 1)\n 6542, # Temporal Spellthread (Quality: 1)\n 6539, # Frozen Spellthread (Quality: 1)\n ]\n\n LEG_ENCHANTS_Q2 = [\n 6495, # Frosted Armor Kit (Quality: 2)\n 6489, # Fierce Armor Kit (Quality: 2)\n 6543, # Temporal Spellthread (Quality: 2)\n 6540, # Frozen Spellthread (Quality: 2)\n ]\n\n LEG_ENCHANTS_Q3 = [\n 6496, # Frosted Armor Kit (Quality: 3)\n 6490, # Fierce Armor Kit (Quality: 3)\n 6544, # Temporal Spellthread (Quality: 3)\n 6541, # Frozen Spellthread (Quality: 3)\n ]\n\n CHEST_ENCHANTS_Q1 = [\n 6623, # Waking Stats (Quality: 1)\n ]\n\n CHEST_ENCHANTS_Q2 = [\n 6624, # Waking Stats (Quality: 2)\n ]\n\n CHEST_ENCHANTS_Q3 = [\n 6625, # Waking Stats (Quality: 3)\n ]\n\n FEET_ENCHANTS_Q1 = [\n 6611, # Watcher'S Loam (Quality: 1)\n 6605, # Plainsrunner'S Breeze (Quality: 1)\n ]\n\n FEET_ENCHANTS_Q2 = [\n 6612, # Watcher'S Loam (Quality: 2)\n 6606, # Plainsrunner'S Breeze (Quality: 2)\n ]\n\n FEET_ENCHANTS_Q3 = [\n 6613, # Watcher'S Loam (Quality: 3)\n 6607, # Plainsrunner'S Breeze (Quality: 3)\n ]\n\n # Join the lists that will count as \"high\" enchants.\n CHEST_ENCHANTS = CHEST_ENCHANTS_Q1 + CHEST_ENCHANTS_Q2 + CHEST_ENCHANTS_Q3\n CLOAK_ENCHANTS = CLOAK_ENCHANTS_Q1 + CLOAK_ENCHANTS_Q2 + CLOAK_ENCHANTS_Q3\n BRACER_ENCHANTS = BRACER_ENCHANTS_Q1 + BRACER_ENCHANTS_Q2 + BRACER_ENCHANTS_Q3\n LEG_ENCHANTS = LEG_ENCHANTS_Q1 + LEG_ENCHANTS_Q2 + LEG_ENCHANTS_Q3\n FEET_ENCHANTS = FEET_ENCHANTS_Q1 + FEET_ENCHANTS_Q2 + FEET_ENCHANTS_Q3\n RING_ENCHANTS = RING_ENCHANTS_Q1 + RING_ENCHANTS_Q2 + RING_ENCHANTS_Q3\n WEAPON_ENCHANTS = WEAPON_ENCHANTS_Q1 + WEAPON_ENCHANTS_Q2 + WEAPON_ENCHANTS_Q3 + DEATH_KNIGHT_RUNEFORGES\n\n ENCHANTS = {\n 'CHEST': CHEST_ENCHANTS,\n 'BACK': CLOAK_ENCHANTS,\n 'WRIST': BRACER_ENCHANTS,\n 'LEGS': LEG_ENCHANTS,\n 'FEET': FEET_ENCHANTS,\n 'FINGER_1': RING_ENCHANTS,\n 'FINGER_2': RING_ENCHANTS,\n 'MAIN_HAND': WEAPON_ENCHANTS,\n 'OFF_HAND': WEAPON_ENCHANTS\n }\n\n CLASS_INFO = {\n 'Death Knight': ('plate', 'dreadful'),\n 'Demon Hunter': ('leather', 'dreadful'),\n 'Evoker': ('mail', 'zenith'),\n 'Druid': ('leather', 'mystic'),\n 'Hunter': ('mail', 'mystic'),\n 'Mage': ('cloth', 'mystic'),\n 'Monk': ('leather', 'zenith'),\n 'Paladin': ('plate', 'venerated'),\n 'Priest': ('cloth', 'venerated'),\n 'Rogue': ('leather', 'zenith'),\n 'Shaman': ('mail', 'venerated'),\n 'Warlock': ('cloth', 'dreadful'),\n 'Warrior': ('plate', 'zenith')\n }\n\n def load(self, realm, frealm, toonlist, data, groupstats):\n\n classes = ClassEntry.get_mapping()\n oauth_headers = get_oauth_headers()\n\n session = FuturesSession(max_workers=10)\n futures = []\n\n # Request all of the toon data from the blizzard API and determine the\n # group's ilvls, armor type counts and token type counts. subs are not\n # included in the counts, since they're not really part of the main\n # group. The Blizzard API has a limit of 100 calls per second. Keep a\n # count and if we hit 100 calls, we'll wait a half second before\n # continuing. If someone has more than 100 toons in their list, they\n # should be slapped.\n toon_count = 0\n for toon in toonlist:\n toonname = toon.name\n toonrealm = toon.realm\n if toonrealm == realm:\n toonfrealm = frealm\n else:\n toonfrealm = Realm.query_realm(toonrealm)\n\n # TODO: this object can probably be a class instead of another dict\n newdata = dict()\n data.append(newdata)\n\n # a realm is received in the json data from the API, but we need to\n # pass the normalized value to the next stages. ignore this field\n # from the data.\n newdata['toonrealm'] = toonrealm\n newdata['toonfrealm'] = toonfrealm\n newdata['status'] = toon.status\n newdata['role'] = toon.role\n\n quoted_name = urllib.parse.quote(toonname.encode('utf-8').lower())\n url = f'https://us.api.blizzard.com/profile/wow/character/{toonrealm}/{quoted_name}?namespace=profile-us&locale=en_US'\n\n # create the rpc object for the fetch method. the deadline\n # defaults to 5 seconds, but that seems to be too short for the\n # Blizzard API site sometimes. setting it to 10 helps a little\n # but it makes page loads a little slower.\n future = session.get(url, headers=oauth_headers)\n future.toonname = toonname\n future.toondata = newdata\n futures.append(future)\n\n # This really shouldn't happen, but pause a half-second every\n # hundred toons so that we don't blow through the API quota.\n toon_count = toon_count + 1\n if toon_count > 100:\n time.sleep(0.5)\n toon_count = 0\n\n # Now that all of the RPC calls have been created, loop through the data\n # dictionary one more time and wait for each fetch to be completed. Once\n # all of the waits finish, then we have all of the data from the\n # Blizzard API and can loop through all of it and build the page.\n start = time.time()\n for future in as_completed(futures):\n resp = future.result()\n self.handle_result(resp, future.toonname, future.toondata,\n groupstats, classes)\n end = time.time()\n logging.info(f\"Time spent retrieving data: {end-start} seconds\")\n\n # Callback that handles the result of the call to the Blizzard API. This will fill in\n # the toondata dict for the requested toon with either data from Battle.net or with an\n # error message to display on the page.\n def handle_result(self, response, name, toondata, groupstats, classes):\n\n toondata['name'] = name\n toondata['load_status'] = 'ok'\n\n # change the json from the response into a dict of data.\n try:\n jsondata = response.json()\n except Exception as e:\n toondata['load_status'] = 'nok'\n toondata['reason'] = 'Failed to parse data from Blizzard. Refresh page to try again.'\n logging.exception('Failed to parse response as json: %s' % response.content)\n return\n\n # Catch HTTP errors from Blizzard. 404s really wreck everything.\n if not self.check_response_status(response, jsondata, 'profile', toondata):\n return;\n\n # store off some of the fields that we care about directly\n toondata['guild'] = jsondata.get('guild', {})\n toondata['realm'] = jsondata['realm']\n toondata['character_class'] = jsondata['character_class']\n toondata['name'] = jsondata['name']\n toondata['average_item_level'] = jsondata['average_item_level']\n toondata['equipped_item_level'] = jsondata['equipped_item_level']\n toondata['covenant'] = jsondata.get('covenant_progress',{}).get('chosen_covenant',{}).get('name','None')\n\n logging.info(\"got good results for %s\" % name.encode('ascii', 'ignore'))\n\n # For each toon, update the statistics for the group as a whole\n if toondata['status'] == 'main':\n groupstats['ilvlmains'] += 1\n groupstats['totalilvl'] += jsondata['average_item_level']\n groupstats['totalilvleq'] += jsondata['equipped_item_level']\n\n toonclass = jsondata['character_class']['name']\n groupstats[Importer.CLASS_INFO.get(toonclass, ())[0]] += 1\n groupstats[Importer.CLASS_INFO.get(toonclass, ())[1]] += 1\n\n if toondata['role'] == 'dps':\n groupstats['melee'] += 1\n elif toondata['role'] == 'ranged':\n groupstats['ranged'] += 1\n elif toondata['role'] == 'tank':\n groupstats['tanks'] += 1\n elif toondata['role'] == 'healer':\n groupstats['healers'] += 1\n\n # We're also going to need the equipment for this character so make a second request\n oauth_headers = get_oauth_headers()\n try:\n equip_res = requests.get(f\"{jsondata['equipment']['href']}&locale=en_US\",\n headers=oauth_headers)\n except Exception as e:\n self.handle_request_exception(e, 'equipment', toondata)\n return\n\n # change the json from the response into a dict of data.\n jsondata = equip_res.json()\n\n # Catch HTTP errors from Blizzard. 404s really wreck everything.\n if not self.check_response_status(equip_res, jsondata, 'equipment', toondata):\n return;\n\n toondata['equipped_items'] = jsondata.get('equipped_items', [])\n\n # Group all gems together into a comma-separated list for tooltipParams\n for item in toondata['equipped_items']:\n if not isinstance(item, dict):\n continue\n\n item['tooltips'] = {}\n if 'sockets' in item:\n gems = []\n for socket in item['sockets']:\n gems.append(socket.get('item', {}).get('id', 0))\n\n if gems:\n item['tooltips']['gems'] = ':'.join(str(x) for x in gems)\n\n # Default enchant checking to -1 for all items\n item['enchant'] = -1\n\n slot = item['slot']['type']\n if slot in Importer.ENCHANTS:\n if slot != 'OFF_HAND' or 'weapon' in item:\n item['enchant'] = 0\n for enchant in item.get('enchantments', []):\n\n # Skip non-permanent enchants\n if enchant.get('enchantment_slot', {}).get('id', -1) != 0:\n continue\n\n enchant_id = enchant.get('enchantment_id', 0)\n item['tooltips']['enchant'] = enchant_id\n if enchant_id in Importer.ENCHANTS[slot] and item['enchant'] < 2:\n item['enchant'] = 2\n elif enchant != 0 and item['enchant'] < 1:\n item['enchant'] = 1\n\n # Handles exceptions from requests to the API in a common fashion\n def handle_request_exception(self, exception, where, toondata):\n toondata['load_status'] = 'nok'\n\n if isinstance(exception, requests.Timeout):\n logging.error('request timed out on toon %s' % name.encode('ascii', 'ignore'))\n toondata['reason'] = f'Timeout retrieving {where} data from Battle.net for {name}. Refresh page to try again.'\n elif isinstance(exception, requests.ConnectionError):\n logging.error('request failed to connect for %s' % name.encode('ascii', 'ignore'))\n toondata['reason'] = f'Failed to connect to Battle.net when retrieving {where} for toon {name}'\n else:\n logging.error('request threw unknown exception on toon %s' % name.encode('ascii', 'ignore'))\n toondata['reason'] = f'Unknown error retrieving {where} data from Battle.net for toon {name}. Refresh page to try again.'\n\n # Checks response codes and error messages from the API in a common fashion.\n def check_response_status(self, response, jsondata, where, toondata):\n if response.status_code != 200 or ( 'code' in jsondata and 'detail' in jsondata ):\n code = jsondata.get('code', response.status_code)\n logging.error('request returned a %d status code on toon %s' % (code, toondata['name'].encode('ascii', 'ignore')))\n toondata['load_status'] = 'nok'\n toondata['reason'] = 'Got a %d requesting %s from Battle.net for toon %s. Refresh page to try again.' % (code, where, toondata['name'])\n\n if 'detail' in jsondata:\n toondata['reason'] += ' (reason: %s)' % jsondata['detail']\n\n return False\n\n return True\n\n\nclass Setup(object):\n # Loads the list of realms into the datastore from the blizzard API so that\n # the realm list on the front page gets populated. Also loads the list of\n # classes into a table on the DB so that we don't have to request it\n def initdb(self, app):\n try:\n oauth_headers = get_oauth_headers()\n realmcount = self.init_realms(oauth_headers)\n classcount = self.init_classes(oauth_headers)\n return [realmcount, classcount]\n except Exception as e:\n logging.exception('')\n return [0, 0]\n\n def init_realms(self, oauth_headers):\n # Delete all of the entities out of the realm datastore so fresh\n # entities can be loaded.\n query = Realm.query()\n for row in query.fetch():\n row.key.delete()\n\n # retrieve a list of realms from the blizzard API\n url = 'https://us.api.blizzard.com/data/wow/realm/index?namespace=dynamic-us&locale=en_US®ion=us'\n response = requests.get(url, headers=oauth_headers)\n if response.status_code == 200:\n jsondata = response.json()\n else:\n jsondata = {'realms': []}\n\n for realm in jsondata['realms']:\n new_realm = Realm(realm=realm['name'], slug=realm['slug'],\n namespace='Realms', id=realm['slug'])\n new_realm.put()\n\n return len(jsondata['realms'])\n\n def init_classes(self, oauth_headers):\n # Delete all of the entities out of the class datastore so fresh\n # entities can be loaded.\n query = ClassEntry.query()\n for row in query.fetch():\n row.key.delete()\n\n # retrieve a list of classes from the blizzard API\n url = 'https://us.api.blizzard.com/data/wow/playable-class/index?namespace=static-us&locale=en_US®ion=us'\n response = requests.get(url, headers=oauth_headers)\n if response.status_code == 200:\n jsondata = response.json()\n else:\n jsondata = {'classes': []}\n\n for cls in jsondata['classes']:\n class_entry = ClassEntry(classId=cls['id'], name=cls['name'])\n class_entry.put()\n\n return len(jsondata['classes'])\n","sub_path":"wowapi.py","file_name":"wowapi.py","file_ext":"py","file_size_in_byte":19769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"350927258","text":"import select\nimport socket\nimport unittest\n\nfrom pyrad.packet import PacketCode\nfrom pyrad.proxy import Proxy\nfrom pyrad.server import Server\nfrom pyrad.server import ServerPacketError\nfrom pyrad.tests.mock import MockClassMethod\nfrom pyrad.tests.mock import MockFd\nfrom pyrad.tests.mock import MockPoll\nfrom pyrad.tests.mock import MockSocket\nfrom pyrad.tests.mock import UnmockClassMethods\n\n\nclass TrivialObject:\n \"\"\"dummy object\"\"\"\n\n\nclass SocketTests(unittest.TestCase):\n def setUp(self):\n self.orgsocket = socket.socket\n socket.socket = MockSocket\n self.proxy = Proxy()\n self.proxy._fdmap = {}\n\n def tearDown(self):\n socket.socket = self.orgsocket\n\n def testProxyFd(self):\n self.proxy._poll = MockPoll()\n self.proxy._prepare_sockets()\n self.failUnless(isinstance(self.proxy._proxyfd, MockSocket))\n self.assertEqual(list(self.proxy._fdmap.keys()), [1])\n self.assertEqual(\n self.proxy._poll.registry,\n {1: select.POLLIN | select.POLLPRI | select.POLLERR})\n\n\nclass ProxyPacketHandlingTests(unittest.TestCase):\n def setUp(self):\n self.proxy = Proxy()\n self.proxy.hosts['host'] = TrivialObject()\n self.proxy.hosts['host'].secret = 'supersecret'\n self.packet = TrivialObject()\n self.packet.code = PacketCode.ACCESS_ACCEPT\n self.packet.source = ('host', 'port')\n\n def testHandleProxyPacketUnknownHost(self):\n self.packet.source = ('stranger', 'port')\n try:\n self.proxy._handle_proxy_packet(self.packet)\n except ServerPacketError as e:\n self.failUnless('unknown host' in str(e))\n else:\n self.fail()\n\n def testHandleProxyPacketSetsSecret(self):\n self.proxy._handle_proxy_packet(self.packet)\n self.assertEqual(self.packet.secret, 'supersecret')\n\n def testHandleProxyPacketHandlesWrongPacket(self):\n self.packet.code = PacketCode.ACCESS_REQUEST\n try:\n self.proxy._handle_proxy_packet(self.packet)\n except ServerPacketError as e:\n self.failUnless('non-response' in str(e))\n else:\n self.fail()\n\n\nclass OtherTests(unittest.TestCase):\n def setUp(self):\n self.proxy = Proxy()\n self.proxy._proxyfd = MockFd()\n\n def tearDown(self):\n UnmockClassMethods(Proxy)\n UnmockClassMethods(Server)\n\n def testProcessInputNonProxyPort(self):\n fd = MockFd(fd=111)\n MockClassMethod(Server, '_process_input')\n self.proxy._process_input(fd)\n self.assertEqual(\n self.proxy.called,\n [('_process_input', (fd,), {})])\n\n def testProcessInput(self):\n MockClassMethod(Proxy, '_grab_packet')\n MockClassMethod(Proxy, '_handle_proxy_packet')\n self.proxy._process_input(self.proxy._proxyfd)\n self.assertEqual(\n [x[0] for x in self.proxy.called],\n ['_grab_packet', '_handle_proxy_packet'])\n\n\nif not hasattr(select, 'poll'):\n del SocketTests\n","sub_path":"pyrad/tests/testProxy.py","file_name":"testProxy.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"117086920","text":"def test():\n N = int(input())\n if N == 0:\n return -1\n\n d = {}\n i = 0\n while len(d) < 10:\n i+=1\n t = N*i\n for c in str(t):\n if not c in d:\n d[c] = 1\n return i*N\n\n\nT = int(input())\n\nfor i in range(T):\n a = test()\n print('Case #'+str(i + 1)+': '+(str(a) if a > 0 else 'INSOMNIA'))\n","sub_path":"codes/CodeJamCrawler/16_0_1/apankiv/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"93077444","text":"def primeiras_ocorrencias(pal):\n dic={}\n palavra=[]\n listaocorrencia=[]\n for i in range(len(pal)):\n if i not in palavra:\n \tpalavra.append(i)\n for i in range(len(palavra)):\n dic[palavra[i]]=listaocorrencia[i]\n return dic\n\n ","sub_path":"backup/user_324/ch82_2019_06_04_22_42_05_590183.py","file_name":"ch82_2019_06_04_22_42_05_590183.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"274207713","text":"from typing import List, Set, Tuple\n\nimport numpy as np\n\n\ndef calc_floodfill(\n occupied: np.ndarray,\n seeds: List[Tuple[int, int]],\n min_dist_check=0,\n max_dist=np.Inf,\n) -> np.ndarray:\n size_x, size_y = occupied.shape\n\n result = np.full_like(occupied, fill_value=np.nan, dtype=float)\n\n dist = 0\n frontiers = set(seeds)\n\n while frontiers:\n frontiers = {\n pos\n for pos in frontiers\n if np.isnan(result[pos]) and (dist < min_dist_check or occupied[pos] == 0)\n }\n\n new_frontiers: Set[Tuple[int, int]] = set()\n for frontier_pos in frontiers:\n result[frontier_pos] = dist\n\n x, y = frontier_pos\n if x + 1 < size_x:\n new_frontiers.add((x + 1, y))\n if y + 1 < size_y:\n new_frontiers.add((x, y + 1))\n if x - 1 >= 0:\n new_frontiers.add((x - 1, y))\n if y - 1 >= 0:\n new_frontiers.add((x, y - 1))\n\n frontiers = new_frontiers\n dist += 1\n\n if dist > max_dist:\n break\n\n return result\n","sub_path":"dstkdev/floodfill.py","file_name":"floodfill.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"389471289","text":"#! /usr/bin/env python3\n\"\"\"NAME:\n Telegram Bot\n\nUSAGE:\n botctl bot [start | stop] [--help]\n\nOPTIONS:\n --help Print this messasge.\n\nCOMMANDS:\n bot start Starts the bot.\n\"\"\"\nfrom docopt import docopt\nfrom bot.services import CommonServices as bot_CommonServices\nfrom bot.services import BotServices as bot_BotServices\n\ndef do_bot_start():\n cs = bot_CommonServices()\n bs = bot_BotServices( common_services = cs )\n bot = bs.bot()\n bot.run()\n\ndef main():\n args = docopt( __doc__ )\n\n if args[ \"bot\" ]:\n if args[ \"start\" ]:\n do_bot_start()\n","sub_path":"src/bot/cli/bot_cmd.py","file_name":"bot_cmd.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"469729181","text":"from __future__ import print_function\nfrom random import random\n\ndef printIntro():\n print(\"Welcome to the raquet ball simulation!\")\n print(\"This program simulates a game of raquetball between two\")\n print(\"players called 'A' and 'B'. The abilities of each player\")\n print(\"is indicated by the probability (a number between 0 and 1)\")\n print(\"that the player wins the point when serving. Player A will\")\n print(\"always have the first serve.\")\n\ndef getInputs():\n probA = input(\"Enter the probability for player A of winning a serve: \")\n probB = input(\"Enter the probability for player B of winning a serve: \")\n n = input(\"Enter the number of games to be simualted: \")\n return probA, probB, n\n\ndef score(probA, probB, aScore, bScore, serving):\n if serving == \"A\":\n if random() <= probA:\n aScore = aScore + 1\n serving = \"A\"\n else:\n serving = \"B\"\n elif serving == \"B\":\n if random() <= probB:\n bScore = bScore + 1\n serving = \"B\"\n else:\n serving = \"A\"\n return aScore, bScore, serving\n\ndef game(probA, probB):\n aScore = 0\n bScore = 0\n serving = \"A\"\n step = 0\n while aScore < 15 and bScore < 15:\n aScore, bScore, serving = score(probA, probB, aScore, bScore, serving)\n step = step + 1\n if aScore > bScore:\n winner = \"A\"\n elif bScore > aScore:\n winner = \"B\"\n return winner\n\ndef simulation(probA, probB, n):\n winsA = 0\n winsB = 0\n for i in range(n):\n winner = game(probA, probB)\n if winner == \"A\":\n winsA = winsA + 1\n elif winner == \"B\":\n winsB = winsB + 1\n if winsA > winsB:\n champion = \"A\"\n elif winsB > winsA:\n champion = \"B\"\n return winsA, winsB, champion\n\ndef printOutro(winsA, winsB, champion, n):\n print(\"After\",n,\"games of simulation,\")\n print(\"Wins for A:\",winsA)\n print(\"Wins for B:\",winsB)\n print(\"Champion:\",champion)\n\ndef main():\n printIntro()\n probA, probB, n = getInputs()\n winsA, winsB, champion = simulation(probA, probB, n)\n printOutro(winsA, winsB, champion, n)\n\nmain()\n","sub_path":"learning/python_programming_book/chapter9/raquet_ball/raquet_ball.py","file_name":"raquet_ball.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"189971673","text":"def split_parity(lst):\n left=0\n right=len(lst)-1\n done = False\n while left> The model is running on {}'.format(args.DEVICE))\n \n if args.dataset == 'NTU':\n if args.modality == 'rgb':\n args.dataset_dir = '/xionghui/xionghui/NTULargeScaleRGB-DDataset'\n else:\n args.dataset_dir = '/ssd/xionghui/NTULargeScaleRGB-DDataset'\n args.num_classes = 60\n args.k = 1\n elif args.dataset == 'SYSU':\n args.dataset_dir = '/home/xionghui/dataset/SYSU3DvideoNorm'\n args.num_classes = 12\n elif args.dataset == 'ORGBD':\n args.dataset_dir = '/home/xionghui/dataset/ORGBD'\n args.num_classes = 7\n \n if args.train_type != 'TSN' and args.modality != 'skeleton':\n if args.train_sample_num != args.test_sample_num:\n print('sample numbers must be same in train and test when train method is not TSN')\n raise AssertionError\n \n if args.optim == 'Adam':\n # Adam do not rely much on learning rate decay\n args.patience = 200\n \n \n config_dir = '/home/xionghui/project/action_prediction/myconfs/config_{}.py'\\\n .format(args.c)\n config_0 = '/home/xionghui/project/action_prediction/myconfs/config_0.py'\n if not os.path.exists(config_dir):\n import shutil\n shutil.copy(config_0, config_dir)\n return args","sub_path":"opts.py","file_name":"opts.py","file_ext":"py","file_size_in_byte":12768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"549018437","text":"import tkinter as tk\r\nfrom tkinter import ttk\r\nfrom tkinter import font, colorchooser, filedialog, messagebox\r\nimport os\r\n\r\nmain_application=tk.Tk()\r\nmain_application.geometry('1200x800')\r\nmain_application.title('GT Text Editor')\r\n\r\n#***************************************** Main Menu ***********************************************************\r\nmain_menu=tk.Menu()\r\nfile=tk.Menu(main_menu,tearoff=False)\r\nedit=tk.Menu(main_menu,tearoff=False)\r\nview=tk.Menu(main_menu,tearoff=False)\r\ncolor_theme=tk.Menu(main_menu,tearoff=False)\r\ntheme_choice=tk.StringVar()\r\ncolor_dict={\r\n 'Light default':('#000000','#ffffff'),\r\n 'Light Plus':('#474747','#e0e0e0'),\r\n 'Dark':('#c4c4c4','#2d2d2d'),\r\n 'Red':('#2d2d2d','#ff0000'),\r\n 'Monokai':('#d3b774','#474747'),\r\n 'Night Blue':('#ededed','#6b9dc2')\r\n}\r\n\r\n# cascade\r\nmain_menu.add_cascade(label='File',menu=file)\r\nmain_menu.add_cascade(label='Edit',menu=edit)\r\nmain_menu.add_cascade(label='View',menu=view)\r\nmain_menu.add_cascade(label='Color Theme',menu=color_theme)\r\n#_________________________________________ Ending Main Menu ____________________________________________________\r\n\r\n#***************************************** Tool Bar ************************************************************\r\ntool_bar=ttk.Label(main_application)\r\ntool_bar.pack(side=tk.TOP, fill=tk.X)\r\n# font box\r\nfont_tuple=tk.font.families()\r\nfont_family=tk.StringVar()\r\nfont_box=ttk.Combobox(tool_bar,width=30,textvariable=font_family,state='readonly')\r\nfont_box['values']=font_tuple\r\nfont_box.current(font_tuple.index('Arial'))\r\nfont_box.grid(row=0,column=0,padx=5)\r\n# size box\r\nsize_var=tk.IntVar()\r\nfont_size=ttk.Combobox(tool_bar,width=14,textvariable=size_var,state='readonly')\r\nfont_size['values']=tuple(range(8,81,2))\r\nfont_size.current(2)\r\nfont_size.grid(row=0,column=1,padx=5)\r\n#bold button\r\nbold_btn=ttk.Button(tool_bar,text='B')\r\nbold_btn.grid(row=0,column=2,padx=5)\r\n#italic button\r\nitalic_btn=ttk.Button(tool_bar,text='I')\r\nitalic_btn.grid(row=0,column=3,padx=5)\r\n#underline button\r\nunderline_btn=ttk.Button(tool_bar,text='U')\r\nunderline_btn.grid(row=0,column=4,padx=5)\r\n#font color button\r\nfont_color_btn=ttk.Button(tool_bar,text='FC')\r\nfont_color_btn.grid(row=0,column=5,padx=5)\r\n#align buttons\r\nalign_left_btn=ttk.Button(tool_bar,text='LA')\r\nalign_left_btn.grid(row=0,column=6,padx=5)\r\nalign_center_btn=ttk.Button(tool_bar,text='CA')\r\nalign_center_btn.grid(row=0,column=7,padx=5)\r\nalign_right_btn=ttk.Button(tool_bar,text='RA')\r\nalign_right_btn.grid(row=0,column=8,padx=5)\r\n#____________________________________ Ending Tool Bar __________________________________________________________\r\n\r\n#*********************************** Text Editor ***************************************************************\r\ntext_editor=tk.Text(main_application)\r\ntext_editor.config(wrap='word',relief=tk.FLAT)\r\n\r\nscroll_bar=tk.Scrollbar(main_application)\r\ntext_editor.focus_set()\r\nscroll_bar.pack(side=tk.RIGHT,fill=tk.Y)\r\ntext_editor.pack(fill=tk.BOTH,expand=True)\r\nscroll_bar.config(command=text_editor.yview)\r\ntext_editor.config(yscrollcommand=scroll_bar.set)\r\n\r\n# font family and font size functionality\r\ncurrent_font_family='Arial'\r\ncurrent_font_size=12\r\n\r\ndef change_font(main_application):\r\n global current_font_family\r\n current_font_family=font_family.get()\r\n text_editor.configure(font=(current_font_family,current_font_size))\r\n\r\ndef change_fontsize(main_application):\r\n global current_font_size\r\n current_font_size=size_var.get()\r\n text_editor.configure(font=(current_font_family,current_font_size))\r\n\r\nfont_box.bind(\"<>\",change_font)\r\nfont_size.bind(\"<>\",change_fontsize)\r\n\r\n# buttons functionality\r\n# bold functionality\r\ndef change_bold():\r\n text_property=tk.font.Font(font=text_editor['font'])\r\n if text_property.actual()['weight']=='normal':\r\n text_editor.configure(font=(current_font_family,current_font_size,'bold'))\r\n if text_property.actual()['weight']=='bold':\r\n text_editor.configure(font=(current_font_family,current_font_size,'normal'))\r\nbold_btn.configure(command=change_bold)\r\n# italic functionality\r\ndef change_italic():\r\n text_property=tk.font.Font(font=text_editor['font'])\r\n if text_property.actual()['slant']=='roman':\r\n text_editor.configure(font=(current_font_family,current_font_size,'italic'))\r\n if text_property.actual()['slant']=='italic':\r\n text_editor.configure(font=(current_font_family,current_font_size,'roman'))\r\nitalic_btn.configure(command=change_italic)\r\n# underline functionality\r\ndef change_underline():\r\n text_property=tk.font.Font(font=text_editor['font'])\r\n if text_property.actual()['underline']==0:\r\n text_editor.configure(font=(current_font_family,current_font_size,'underline'))\r\n if text_property.actual()['underline']==1:\r\n text_editor.configure(font=(current_font_family,current_font_size,'normal'))\r\nunderline_btn.configure(command=change_underline)\r\n# font color functionality\r\ndef change_font_color():\r\n color_var=tk.colorchooser.askcolor()\r\n text_editor.configure(fg=color_var[1])\r\nfont_color_btn.configure(command=change_font_color)\r\n# alignment\r\ndef align_left():\r\n text_content=text_editor.get(1.0,'end')\r\n text_editor.tag_config('left',justify=tk.LEFT)\r\n text_editor.delete(1.0,tk.END)\r\n text_editor.insert(tk.INSERT,text_content,'left')\r\nalign_left_btn.configure(command=align_left)\r\ndef align_center():\r\n text_content=text_editor.get(1.0,'end')\r\n text_editor.tag_config('center',justify=tk.CENTER)\r\n text_editor.delete(1.0,tk.END)\r\n text_editor.insert(tk.INSERT,text_content,'center')\r\nalign_center_btn.configure(command=align_center)\r\ndef align_right():\r\n text_content=text_editor.get(1.0,'end')\r\n text_editor.tag_config('right',justify=tk.RIGHT)\r\n text_editor.delete(1.0,tk.END)\r\n text_editor.insert(tk.INSERT,text_content,'right')\r\nalign_right_btn.configure(command=align_right)\r\ntext_editor.configure(font=('Arial',12))\r\n#______________________________ Ending Text Editor______________________________________________________________\r\n\r\n#********************************** Status Bar*****************************************************************\r\nstatus_bar=ttk.Label(main_application,text='Status Bar')\r\nstatus_bar.pack(side=tk.BOTTOM)\r\n\r\ntext_changed=False\r\ndef changed(event=None):\r\n global text_changed\r\n if text_editor.edit_modified():\r\n text_changed=True\r\n words=len(text_editor.get(1.0,'end-1c').split())\r\n characters=len(text_editor.get(1.0,'end-1c'))\r\n status_bar.configure(text=f'characters:{characters} words:{words}')\r\n text_editor.edit_modified(False)\r\n\r\ntext_editor.bind('<>',changed)\r\n#______________________________ Ending Status Bar_____________________________________________________________\r\n\r\n#************************************ Main Menu Functionality***************************************************\r\nurl=''\r\n#new functionality\r\ndef new_file(event=None):\r\n global url\r\n url=''\r\n text_editor.delete(1.0,tk.END)\r\n\r\n# file commands\r\nfile.add_command(label='New',accelerator='Ctrl+N',command=new_file)\r\n\r\n#open functionality\r\ndef open_file(event=None):\r\n global url\r\n url=filedialog.askopenfilename(initialdir=os.getcwd(),title='Select File',filetypes=(('Text File','*.txt'),('All files','*.*')))\r\n try:\r\n with open(url,'r') as fr:\r\n text_editor.delete(1.0,tk.END)\r\n text_editor.insert(1.0,fr.read())\r\n except FileNotFoundError:\r\n return\r\n except:\r\n return\r\n main_application.title(os.path.basename(url))\r\nfile.add_command(label='Open',accelerator='Ctrl+O',command=open_file)\r\n\r\n# Save functionality\r\ndef save_file(event=None):\r\n global url\r\n try:\r\n if url:\r\n content=str(text_editor.get(1.0,tk.END))\r\n with open(url,'w',encoding='utf-8') as fw:\r\n fw.write(content)\r\n else:\r\n url=filedialog.asksaveasfile(mode='w',defaultextension='.txt',filetypes=(('Text File','*.txt'),('All files','*.*')))\r\n content2=text_editor.get(1.0,tk.END)\r\n url.write(content2)\r\n url.close()\r\n except:\r\n return\r\nfile.add_command(label='Save',accelerator='Ctrl+S',command=save_file)\r\n# Save As functionality\r\ndef save_as(event=None):\r\n global url\r\n try:\r\n content=text_editor.get(1.0,tk.END)\r\n url=filedialog.asksaveasfile(mode='w',defaultextension='.txt',filetypes=(('Text File','*.txt'),('All files','*.*')))\r\n url.write(content)\r\n url.close()\r\n except:\r\n return\r\nfile.add_command(label='Save As',accelerator='Ctrl+Alt+S',command=save_as)\r\n# Exit Functionality\r\ndef exit_func(event=None):\r\n global url,text_changed\r\n try:\r\n if text_changed:\r\n mbox=messagebox.askyesnocancel('Warning','Do you want to save the file')\r\n if mbox is True:\r\n if url:\r\n content=text_editor.get(1.0,tk.END)\r\n with open(url,'w',encoding='utf-8') as fw:\r\n fw.write(content)\r\n main_application.destroy()\r\n else:\r\n content2=str(text_editor.get(1.0,tk.END))\r\n url=filedialog.asksaveasfile(mode='w',defaultextension='.txt',filetypes=(('Text File','*.txt'),('All files','*.*')))\r\n url.write(content2)\r\n url.close\r\n main_application.destroy()\r\n elif mbox is False:\r\n main_application.destroy()\r\n else:\r\n main_application.destroy()\r\n except:\r\n return\r\nfile.add_command(label='Exit',accelerator='Ctrl+Q',command=exit_func)\r\n\r\n# edit commands\r\n# find functionality\r\ndef find_func(event=None):\r\n def find():\r\n word=find_input.get()\r\n text_editor.tag_remove('match','1.0',tk.END)\r\n matches=0\r\n if word:\r\n start_pos='1.0'\r\n while True:\r\n start_pos=text_editor.search(word,start_pos,stopindex=tk.END)\r\n if not start_pos:\r\n break\r\n end_pos=f'{start_pos}+{len(word)}c'\r\n text_editor.tag_add('match',start_pos,end_pos)\r\n matches+=1\r\n start_pos=end_pos\r\n text_editor.tag_config('match',foreground='red',background='yellow')\r\n\r\n def replace():\r\n word=find_input.get()\r\n replace_text=replace_input.get()\r\n content=text_editor.get(1.0,tk.END)\r\n new_content=content.replace(word,replace_text)\r\n text_editor.delete(1.0,tk.END)\r\n text_editor.insert(1.0,new_content)\r\n\r\n find_dialogue=tk.Toplevel()\r\n find_dialogue.geometry('450x250+500+200')\r\n find_dialogue.title('Find')\r\n find_dialogue.resizable(0,0)\r\n\r\n ## frame\r\n find_frame=ttk.LabelFrame(find_dialogue,text='Find/Replace')\r\n find_frame.pack(pady=20)\r\n\r\n ## labels\r\n text_find_label=ttk.Label(find_frame,text='Find:')\r\n text_replace_label=ttk.Label(find_frame,text='Replace:')\r\n\r\n ## entry\r\n find_input=ttk.Entry(find_frame,width=30)\r\n replace_input=ttk.Entry(find_frame,width=30)\r\n\r\n ## button\r\n find_button=ttk.Button(find_frame,text='Find',command=find)\r\n replace_button=ttk.Button(find_frame,text='Replace',command=replace)\r\n\r\n ## label grid\r\n text_find_label.grid(row=0,column=0,padx=4,pady=4)\r\n text_replace_label.grid(row=1,column=0,padx=4,pady=4)\r\n\r\n ## entry grid\r\n find_input.grid(row=0,column=1,padx=4,pady=4)\r\n replace_input.grid(row=1,column=1,padx=4,pady=4)\r\n\r\n ## button grid\r\n find_button.grid(row=2,column=0,padx=8,pady=4)\r\n replace_button.grid(row=2,column=1,padx=8,pady=4)\r\n\r\n find_dialogue.mainloop()\r\n\r\nedit.add_command(label='Copy',accelerator='Ctrl+C',command=lambda:text_editor.event_generate(\"\"))\r\nedit.add_command(label='Paste',accelerator='Ctrl+V',command=lambda:text_editor.event_generate(\"\"))\r\nedit.add_command(label='Cut',accelerator='Ctrl+X',command=lambda:text_editor.event_generate(\"\"))\r\nedit.add_command(label='Clear All',accelerator='Ctrl+ALt+X',command=lambda:text_editor.delete(1.0,tk.END))\r\nedit.add_command(label='Find',accelerator='Ctrl+F',command=find_func)\r\n\r\n# view commands\r\nshow_toolbar=tk.BooleanVar()\r\nshow_toolbar.set(True)\r\nshow_statusbar=tk.BooleanVar()\r\nshow_statusbar.set(True)\r\n\r\ndef hide_toolbar():\r\n global show_toolbar\r\n if show_toolbar:\r\n tool_bar.pack_forget()\r\n show_toolbar=False\r\n else:\r\n text_editor.pack_forget()\r\n status_bar.pack_forget()\r\n tool_bar.pack(side=tk.TOP,fill=tk.X)\r\n text_editor.pack(fill=tk.BOTH,expand=True)\r\n status_bar.pack(side=tk.BOTTOM)\r\n show_toolbar=True\r\ndef hide_statusbar():\r\n global show_statusbar\r\n if show_statusbar:\r\n status_bar.pack_forget()\r\n show_statusbar=False\r\n else:\r\n status_bar.pack(side=tk.BOTTOM)\r\n show_statusbar=True\r\nview.add_checkbutton(label='Tool Bar',onvalue=1,offvalue=0,variable=show_toolbar,command=hide_toolbar)\r\nview.add_checkbutton(label='Status Bar',onvalue=1,offvalue=0,variable=show_statusbar,command=hide_statusbar)\r\n\r\n# color theme commands\r\n\r\ndef change_theme():\r\n chosen_theme=theme_choice.get()\r\n color_tuple=color_dict.get(chosen_theme)\r\n text_editor.config(bg=color_tuple[1],fg=color_tuple[0])\r\ncount=0\r\nfor i in color_dict:\r\n color_theme.add_radiobutton(label=i, variable=theme_choice,command=change_theme)\r\n count+=1\r\n\r\n# ________________________________ Ending Main Menu Functionality_______________________________________________ \r\n\r\nmain_application.config(menu=main_menu)\r\n#binding shortcut keys\r\nmain_application.bind('',new_file)\r\nmain_application.bind('',open_file)\r\nmain_application.bind('',save_file)\r\nmain_application.bind('',save_as)\r\nmain_application.bind('',exit_func)\r\nmain_application.bind('',find_func)\r\n\r\nmain_application.mainloop()\r\n","sub_path":"gt_text_editor.py","file_name":"gt_text_editor.py","file_ext":"py","file_size_in_byte":15335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"90246081","text":"import os\nimport requests\n\nfrom google.cloud import storage\n# from google.cloud.storage import Blob\n\n'''\n# loading .csv to gcp\ncan be loaded as flatfiles or streaming\n\n# requirements\npermission: must be dataOwner (https://cloud.google.com/bigquery/docs/access-control)\nencoding: utf-8\ndatetime: yyyy-mm-dd (iso-8601)\n\n# add more\n- exception: if exists\n\n''' \n\n\ndef initClient():\n '''Initializes bucket. Pulls credentials from $PATH'''\n return storage.Client()\n\nacme = initClient()\nbucket = acme.get_bucket(\"acme-pos-bucket\")\nacme_obj = bucket.blob('kev.csv')\npath = \"/Users/Kev/Desktop/data/kev.csv\"\nacme_obj.upload_from_filename(path)\n\ndef firstRun():\n # blank params as it pulls project info & credentials from $PATH\n acme = storage.Client()\n # bucket name\n acme_bucket = 'acme-pos-bucket'\n # instantiates bucket (acme-pos-bucket)\n bucket = acme.create_bucket(acme_bucket)\n\ndef start():\n acme = storage.Client()\n acme_bucket = storage.Bucket(client=acme,name='acme-pos-bucket')\n\n\ndef stepOne_ingest(dir):\n filename_list = os.listdir(dir) #['trans_fact_8.csv', 'trans_fact_9.csv'...n]\n abspath_list = []\n for obj in filename_list:\n abspath_list.append((obj,os.path.join(dir, obj))) # tuple: (filename,abspath)\n\n # obj[0],obj[1] = filename,abspath\n\n for obj in abspath_list:\n acme_obj = bucket.Blob(name=obj[0],bucket=acme_bucket)\n\n if not acme_obj.exists:\n with open(obj[1],'rb') as my_file:\n acme_obj.upload_from_file(my_file)\n\n# if __name__ == 'main':\n# main()\n\n\n\n","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"542830989","text":"\"\"\"\nClass for Card objects\nNo need for doctests in this system because it is only a constructor\n\"\"\"\n__author__ = 'Reece'\n\n\nclass Card:\n \"\"\"\n _messages:\n 2d list, each list in _messages contains the\n type of event in index 0 and the message for\n the event at index 1\n The first element is for 9pm, the second for\n 10pm and the third for 11pm\n All times will always have an event type and\n a message\n If the type is None, only the message is\n displayed, no effect or zombies are triggered\n e.g.:\n _messages = [\n [None, 'You try hard not to wet yourself']\n ,['Item', None]\n ,['Zombies', '6']\n ]\n\n _health_effects:\n List containing health effects at the time intervals\n The first element is for 9pm, the second for\n 10pm and the third for 11pm\n e.g.:\n [None, -1, None]\n\n _item:\n A reference to the item that the card contains\n \"\"\"\n def __init__(self, messages, health_effects, item):\n self._messages = messages\n self._health_effects = health_effects\n self._item = item\n","sub_path":"model/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"69257174","text":"from django.test import TestCase\nfrom django.db.utils import IntegrityError\nfrom django.core.exceptions import ValidationError\nfrom forum.models import UserData, Post, Topic\nfrom django.contrib.auth.models import User\nimport datetime\n\nclass PostModelTests(TestCase):\n def setUp(self):\n super(PostModelTests, self).setUp()\n self.user1 = User.objects.create_user('user1', 'password')\n self.topic = Topic.objects.create(title='test topic')\n self.userdata1 = UserData.objects.create(user=self.user1)\n\n\n def tearDown(self):\n self.user1 = None\n self.topic = None\n self.userdata1 = None\n super(PostModelTests, self).tearDown()\n\n\n def test_min_title_length(self):\n \"\"\" Posts should have a minimum title length of 5 \"\"\"\n title = 'abc'\n with self.assertRaises(ValueError):\n Post.objects.create(title=title, content='Some content',\n topic=self.topic, posted_by=self.userdata1)\n\n def test_max_title_length(self):\n \"\"\" Posts should ahve a maximum title length of 50 \"\"\"\n title = 'a' * 51\n with self.assertRaises(ValueError):\n Post.objects.create(title=title, content='content',\n topic=self.topic, posted_by=self.userdata1)\n\n def test_min_slug_length(self):\n \"\"\" Posts' slugs should have a minimum length of 5 \"\"\"\n title = 'a a' # a title of len > 5 which has len(slug) < 5\n with self.assertRaises(ValueError):\n Post.objects.create(title=title, content='content',\n topic=self.topic, posted_by=self.userdata1)\n\n def test_no_duplicate_slugs_on_date(self):\n \"\"\" There should be no two posts with the same slug on the same date \"\"\"\n title = 'test post'\n date = datetime.date(2016, 12, 25)\n Post.objects.create(title=title, content='content', topic=self.topic,\n posted_by=self.userdata1, post_date=date)\n with self.assertRaises(ValidationError):\n Post.objects.create(title=title, content='content', topic=self.topic,\n posted_by=self.userdata1, post_date=date)\n","sub_path":"forum/tests/test_post_model.py","file_name":"test_post_model.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"616825458","text":"\"\"\"\nSiEPIC Photonics Package\n\nAuthor: Mustafa Hammood\n Mustafa@siepic.com\n \n https://github.com/SiEPIC-Kits/SiEPIC_Photonics_Package\n\nModule: PCM Analysis \n\nFetches measurement data of a manufactured chip, analyzes the process control monitor (PCM) structures to assess\nthe quality of the fabricated chip.\n\"\"\"\n#%% import package and installed dependent packages\nimport sys, os\n# go up two directories\ndir_path = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.dirname(os.path.dirname(dir_path)))\n\nimport SiEPIC_Photonics_Package as SiEPIC_PP\nfrom SiEPIC_Photonics_Package.setup import *\nimport matplotlib.pyplot as plt\nimport requests, zipfile, matplotlib\n\n#%%\ndef PCM_analysis( URL, pol, download = True, PORT = 1 ):\n # create new directory for storing downloaded PCM data, download and unzip all the data\n path = 'download'+pol+'/'\n file_name = 'experimental_data'+pol+'.zip'\n os.chdir(path)\n \n if download == True:\n try: \n os.mkdir(path)\n except OSError: \n print (\"**ERROR**: Creation of the directory %s failed, remove existing directory\" % path)\n else: \n print (\"Successfully created the directory %s \" % path)\n \n print (\"Downloading data. This may take a while. . . \")\n r = requests.get(URL,allow_redirects=True)\n with open(file_name, 'wb') as f:\n f.write(r.content)\n with zipfile.ZipFile(file_name,\"r\") as zip_ref:\n zip_ref.extractall()\n print (\"Experimental data download and unzip complete. . . \")\n\n # iterate through all data, remove all non PCM data, and remove all .pdf data\n for filename in os.listdir(os.getcwd()):\n if filename.endswith(\".pdf\"): \n os.remove(filename)\n continue\n if filename.startswith(\"PCM_\") == False:\n os.remove(filename)\n print (\"Data clean up complete, remove all .pdf and non PCM data. . .\\n\")\n \n # run all\n WGloss_straight(pol, PORT)\n WGloss_spiral(pol, PORT)\n \n # PCM structures that are only available to TE measurements\n if pol == 'TE':\n WGloss_SWG(PORT)\n #Bragg_sweep(PORT)\n #contraDC(PORT)\n contraDCloss(PORT)\n \n#%%\n# analyze the losses of straight waveguides by cutback method\ndef WGloss_straight(pol, PORT):\n # PCM structure ID\n file_ID = 'PCM_PCM_StraightWGloss'\n \n # PCM structure lengths\n if pol == 'TE':\n length = [7418, 14618, 21818, 29018]\n if pol == 'TM':\n length = [10000, 17200, 24400, 31600]\n \n # divide by 10000 to see result in dB/cm\n length_cm = [i/10000 for i in length]\n \n input_data_response = []\n for i in length:\n for filename in os.listdir(os.getcwd()):\n if filename.startswith(file_ID+str(i)+pol) == True:\n print(filename)\n input_data_response.append( SiEPIC_PP.core.parse_response(filename,PORT) )\n \n #%% apply SiEPIC_PP cutback extraction function and plot\n [insertion_loss_wavelength, insertion_loss_fit, insertion_loss_raw] = SiEPIC_PP.core.cutback( input_data_response, length_cm, 1550e-9 )\n\n # plot all cutback structures responses\n plt.figure(0)\n wavelength = input_data_response[0][0]*1e9\n for i in enumerate(length):\n fig0 = plt.plot(wavelength,input_data_response[i[0]][1], label = 'L = '+str(length[i[0]])+' um')\n plt.legend(loc=0)\n plt.ylabel('Power (dBm)', color = 'black')\n plt.xlabel('Wavelength (nm)', color = 'black')\n plt.xlim(round(min(wavelength)),round(max(wavelength)))\n plt.title(\"Raw measurement of cutback structures (Straight waveguides)\")\n plt.savefig('WGloss_straight_'+pol+'.pdf')\n matplotlib.rcParams.update({'font.size': 14, 'font.family' : 'Times New Roman', 'font.weight': 'bold'})\n \n # Insertion loss vs wavelength plot\n plt.figure(1)\n linspace = numpy.linspace(wavelength[0],wavelength[len(wavelength)-1], len(insertion_loss_fit))\n fig1 = plt.plot(linspace,insertion_loss_raw, label='Insertion loss (raw)', color='blue')\n fig2 = plt.plot(linspace,insertion_loss_fit, label='Insertion loss (fit)', color='red')\n plt.legend(loc=0)\n plt.ylabel('Loss (dB/cm)', color = 'black')\n plt.xlabel('Wavelength (nm)', color = 'black')\n plt.setp(fig2, 'linewidth', 4.0)\n plt.xlim(round(min(linspace)),round(max(linspace)))\n plt.title(\"Insertion losses using the cut-back method (Straight waveguides)\")\n plt.savefig('WGloss_straight_fit_'+pol+'.pdf')\n matplotlib.rcParams.update({'font.size': 14, 'font.family' : 'Times New Roman', 'font.weight': 'bold'})\n\n return\n\n# analyze the losses of spiral waveguides by cutback method\ndef WGloss_spiral(pol, PORT):\n # PCM structure ID\n file_ID = 'PCM_PCM_SpiralWG'\n \n # PCM structure lengths\n \n length = [0, 5733, 9429, 20613]\n \n # divide by 10000 to see result in dB/cm\n length_cm = [i/10000 for i in length]\n \n input_data_response = []\n for i in length:\n for filename in os.listdir(os.getcwd()):\n if filename.startswith(file_ID+str(i)+pol) == True:\n print(filename)\n input_data_response.append( SiEPIC_PP.core.parse_response(filename,PORT) )\n \n #%% apply SiEPIC_PP cutback extraction function and plot\n [insertion_loss_wavelength, insertion_loss_fit, insertion_loss_raw] = SiEPIC_PP.core.cutback( input_data_response, length_cm, 1550e-9 )\n\n # plot all cutback structures responses\n plt.figure(2)\n wavelength = input_data_response[0][0]*1e9\n for i in enumerate(length):\n fig1 = plt.plot(wavelength,input_data_response[i[0]][1], label = 'L = '+str(length[i[0]])+' um')\n plt.legend(loc=0)\n plt.ylabel('Power (dBm)', color = 'black')\n plt.xlabel('Wavelength (nm)', color = 'black')\n plt.xlim(round(min(wavelength)),round(max(wavelength)))\n plt.title(\"Raw measurement of cutback structures (Spiral waveguides)\")\n plt.savefig('WGloss_spiral_'+pol+'.pdf')\n matplotlib.rcParams.update({'font.size': 14, 'font.family' : 'Times New Roman', 'font.weight': 'bold'})\n \n # Insertion loss vs wavelength plot\n plt.figure(3)\n linspace = numpy.linspace(wavelength[0],wavelength[len(wavelength)-1], len(insertion_loss_fit))\n fig1 = plt.plot(linspace,insertion_loss_raw, label='Insertion loss (raw)', color='blue')\n fig2 = plt.plot(linspace,insertion_loss_fit, label='Insertion loss (fit)', color='red')\n plt.legend(loc=0)\n plt.ylabel('Loss (dB/cm)', color = 'black')\n plt.xlabel('Wavelength (nm)', color = 'black')\n plt.setp(fig2, 'linewidth', 4.0)\n plt.xlim(round(min(linspace)),round(max(linspace)))\n plt.title(\"Insertion losses using the cut-back method (Spiral waveguides)\")\n plt.savefig('WGloss_spiral_fit_'+pol+'.pdf')\n matplotlib.rcParams.update({'font.size': 14, 'font.family' : 'Times New Roman', 'font.weight': 'bold'})\n\n return\n\n# analyze the losses of sub-wavelength waveguides by cutback method\ndef WGloss_SWG(PORT):\n # PCM structure ID\n file_ID = 'PCM_SWG'\n \n # PCM structure lengths\n length = [0, 800, 1600, 4000, 9600 ]\n \n # divide by 10000 to see result in dB/cm\n length_cm = [i/10000 for i in length]\n \n input_data_response = []\n for i in length:\n for filename in os.listdir(os.getcwd()):\n if filename.startswith(file_ID+str(i)) == True:\n print(filename)\n input_data_response.append( SiEPIC_PP.core.parse_response(filename,PORT) )\n \n #%% apply SiEPIC_PP cutback extraction function and plot\n [insertion_loss_wavelength, insertion_loss_fit, insertion_loss_raw] = SiEPIC_PP.core.cutback( input_data_response, length_cm, 1550e-9 )\n\n # plot all cutback structures responses\n plt.figure(4)\n wavelength = input_data_response[0][0]*1e9\n for i in enumerate(length):\n fig2 = plt.plot(wavelength,input_data_response[i[0]][1], label = 'L = '+str(length[i[0]])+' um')\n plt.legend(loc=0)\n plt.ylabel('Power (dBm)', color = 'black')\n plt.xlabel('Wavelength (nm)', color = 'black')\n plt.xlim(round(min(wavelength)),round(max(wavelength)))\n plt.title(\"Raw measurement of cutback structures (SWG waveguides)\")\n plt.savefig('WGloss_SWG'+'.pdf')\n matplotlib.rcParams.update({'font.size': 14, 'font.family' : 'Times New Roman', 'font.weight': 'bold'})\n \n # Insertion loss vs wavelength plot\n plt.figure(5)\n linspace = numpy.linspace(wavelength[0],wavelength[len(wavelength)-1], len(insertion_loss_fit))\n fig1 = plt.plot(linspace,insertion_loss_raw, label='Insertion loss (raw)', color='blue')\n fig2 = plt.plot(linspace,insertion_loss_fit, label='Insertion loss (fit)', color='red')\n plt.legend(loc=0)\n plt.ylabel('Loss (dB/cm)', color = 'black')\n plt.xlabel('Wavelength (nm)', color = 'black')\n plt.setp(fig2, 'linewidth', 4.0)\n plt.xlim(round(min(linspace)),round(max(linspace)))\n plt.title(\"Insertion losses using the cut-back method (SWG waveguides)\")\n plt.savefig('WGloss_SWG_fit'+'.pdf')\n matplotlib.rcParams.update({'font.size': 14, 'font.family' : 'Times New Roman', 'font.weight': 'bold'})\n\n return\n\n# analyze the bandwidth and central wavelength of Bragg gratings as a function of corrugation strength\ndef Bragg_sweep(PORT):\n # PCM structure ID\n file_ID = 'PCM_PCMBraggDW'\n \n # PCM structure lengths\n sweep = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150]\n \n \n return\n\n# analyze the spectrum of a contra-directional coupler, observe sidelobes and self-reflection\ndef contraDC():\n return\n\n#%% analyze the losses of contra-directional coupler (drop port) by cutback method\ndef contraDCloss(PORT):\n # PCM structure ID\n file_ID = 'PCM_PCMcontraDCcascaded'\n \n # PCM structure lengths\n length = [3, 5, 8, 11]\n \n if PORT == 1:\n PORT_REF = 2\n else:\n PORT_REF = 1\n \n # import the data\n input_data_response = []\n ref_data_response = []\n calibrated_data_response = []\n for idx, val in enumerate(length):\n for filename in os.listdir(os.getcwd()):\n if filename.startswith(file_ID+str(val)) == True:\n print(filename)\n input_data_response.append( SiEPIC_PP.core.parse_response(filename,PORT) )\n ref_data_response.append( SiEPIC_PP.core.parse_response(filename,PORT_REF) )\n # Calibrate the data with respect to the throguh port using SiEPIC PP calibrate envelope function\n [power_corrected,power_calib_fit] = SiEPIC_PP.core.calibrate_envelope( input_data_response[idx], ref_data_response[idx] )\n calibrated_data_response.append(power_corrected)\n\n #%% plot all cutback structures responses\n plt.figure(6)\n wavelength = input_data_response[0][0]*1e9\n for i in enumerate(length):\n fig = plt.plot(wavelength,calibrated_data_response[i[0]], label = 'Stages = '+str(length[i[0]]))\n plt.legend(loc=0)\n plt.ylabel('Power (dBm)', color = 'black')\n plt.xlabel('Wavelength (nm)', color = 'black')\n plt.xlim(round(min(wavelength)),round(max(wavelength)))\n plt.title(\"Raw measurement of contra-directional couplers (drop port)\")\n plt.savefig('WGloss_SWG'+'.pdf')\n matplotlib.rcParams.update({'font.size': 14, 'font.family' : 'Times New Roman', 'font.weight': 'bold'})\n \n return\n\n#%% measurement URL and polarization (ensure dl=1 in case of Dropbox)\nURL = 'https://www.dropbox.com/sh/hqycb5zuekuncoc/AABCY99C70yDGQjhVOnLgo3Ma?dl=1'\npol = 'TE'\n\n# Port either 0 or 1, if data makes no sense, switch.\nPCM_analysis(URL, pol, download = False, PORT = 0)","sub_path":"SiEPIC_Photonics_Package/PCM_analysis/PCM_analysis.py","file_name":"PCM_analysis.py","file_ext":"py","file_size_in_byte":11653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"322082638","text":"def arrays_44_sortA(list = []):\n list_ord = []\n list_pair = []\n for item in list:\n if item % 2 == 0:\n list_pair.append(item)\n else:\n list_ord.append(item)\n list_ord = sorted(list_ord,reverse=True)\n list_pair =sorted(list_pair)\n list_ord += list_pair\n for i in range(len(list_ord)-1):\n print(list_ord[i],end = ' ')\n print(\"\")\nif __name__=='__main__':\n for _ in range(int(input())):\n n = input()\n list = [int(i) for i in input().split()]\n arrays_44_sortA(list)","sub_path":"Code/CodeRecords/2428/60668/256365.py","file_name":"256365.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"23820879","text":"class ListNode (object): # 定义一个链表的结点\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution (object):\n def addTwoNumbers(self, list_1, list_2):\n root = n = ListNode(0) # 变量root用来指向新链表的头,变量n用来迭代\n carry = 0\n while list_1 or list_2 or carry:\n v1 = v2 = 0\n if list_1:\n v1 = list_1.val\n list_1 = list_1.next\n if list_2:\n v2 = list_2.val\n list_2 = list_2.next\n carry, val = divmod(v1 + v2 + carry, 10)\n n.next = ListNode(val)\n n = n.next\n return root.next\n\n\nif __name__ == \"__main__\":\n a = ListNode(1)\n b = ListNode(2)\n c = ListNode(3)\n\n d = ListNode(4)\n e = ListNode(8)\n f = ListNode(6)\n a.next = b\n b.next = c\n d.next = e\n e.next = f\n l1 = a\n l2 = d\n ans = Solution().addTwoNumbers(l1, l2)\n print(ans.val, end=\"\")\n while ans.next is not None:\n ans = ans.next\n print(\"-->\", ans.val, end=\"\"),\n print('\\n')\n","sub_path":"002_AddTwoNumbers.py","file_name":"002_AddTwoNumbers.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"411099446","text":"from io import BytesIO\nfrom django.template.loader import get_template\nimport xhtml2pdf.pisa as pisa\nimport uuid\nfrom django.conf import settings\n\n\ndef save_pdf(params: dict):\n template = get_template(\"pdf.html\")\n print(\"template: \", template)\n html = template.render(params)\n print(\"html: \", html)\n response = BytesIO()\n pdf = pisa.pisaDocument(BytesIO(html.encode(\"UTF-8\")), response)\n print(\"html: \", html)\n file_name = uuid.uuid4()\n\n try:\n print(\"Inside TRy FILE: \", file_name)\n with open(str(settings.BASE_DIR) + f\"/media/{file_name}.pdf\", \"wb+\") as output:\n pdf = pisa.pisaDocument(BytesIO(html.encode(\"UTF-8\")), output)\n print(\"Inside TRy pdf: \", pdf)\n\n except Exception as e:\n print(e)\n\n if pdf.err:\n return \"\", False\n\n return file_name, True\n","sub_path":"home/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"387187139","text":"from direct.gui.DirectButton import DirectButton\r\nfrom direct.gui.DirectFrame import DirectFrame\r\nfrom direct.gui.OnscreenText import OnscreenText\r\nfrom direct.showbase.Loader import Loader\r\nimport gui.menu.pause_options\r\nimport gui.menu.pause\r\n\r\ndef load(args):\r\n \"\"\"Sets up the GUI for the main menu.\r\n Arguments:\r\n This takes no arguments.\r\n \"\"\"\r\n\r\n global backFrame\r\n global menuFrame\r\n global pauseText\r\n global backButton\r\n\r\n font_digital = loader.loadFont('digital.egg')\r\n\r\n backFrame = DirectFrame()\r\n backFrame['frameColor'] = (0, 0, 0, .5)\r\n backFrame['frameSize'] = (2, -2, 2, -2)\r\n backFrame.setPos(0, 0, 0)\r\n\r\n menuFrame = DirectFrame()\r\n menuFrame.reparentTo(backFrame)\r\n menuFrame['frameColor'] = (1, 1, 1, .5)\r\n menuFrame['frameSize'] = (.5, -.5, .5, -.5)\r\n menuFrame.setPos(0, 0, 0)\r\n\r\n pauseText = OnscreenText()\r\n pauseText['text'] = ('PAUSED')\r\n pauseText['scale'] = (.1)\r\n pauseText['font'] = (font_digital)\r\n pauseText['fg'] = (1, 1, 1, 1)\r\n pauseText.setPos(0, .9)\r\n\r\n backButton = DirectButton()\r\n backButton.reparentTo(menuFrame)\r\n backButton['text'] = ('Back')\r\n backButton['text_scale'] = (.1)\r\n backButton['text_pos'] = (0, -0.03)\r\n backButton['frameVisibleScale'] = (2, 0.5, 0)\r\n backButton['frameColor'] = (1, 1, 1, 0)\r\n backButton['command'] = (messenger.send)\r\n backButton['extraArgs'] = (\"switch_gui\", [gui.menu.pause_options, gui.menu.pause])\r\n backButton.setPos(0, 0, 0)\r\n\r\n\r\ndef destroy():\r\n print(\"Destroying Menu\")\r\n pauseText.destroy()\r\n backFrame.destroy()\r\n menuFrame.destroy()\r\n backButton.destroy()\r\n\r\n#def update(): #This doesnt need an update right now.\r\n","sub_path":"gui/menu/pause_options.py","file_name":"pause_options.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"481455542","text":"\"\"\"\nModule containes an example GUI. The main window configures the FEMB \nwhile trace_fft_window provides a second window with live trace and FFT.\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import absolute_import\nfrom builtins import int\nfrom builtins import str\nfrom builtins import hex\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom time import sleep\n\nfrom .configuration import CONFIG\nfrom .trace_fft_window import TRACE_FFT_WINDOW\n\nimport numpy as np\nfrom matplotlib import pyplot\n\nfrom tkinter import *\n\nGAINVALS = (\"4.7 mV/fC\",\"7.8 mV/fC\",\"14 mV/fC\",\"25 mV/fC\")\nSHAPEVALS = (\"0.5 us\", \"1 us\", \"2 us\", \"3 us\")\nBASEVALS = (\"900 mV--induction\",\"200 mV--collection\")\n\nclass CONFIGURATION_WINDOW(Frame):\n\n\n def __init__(self, master=None):\n Frame.__init__(self,master)\n self.pack()\n\n #Define configuration object\n self.femb_config = CONFIG()\n\n #Define general commands column\n self.define_general_commands_column()\n\n #Define configuration commands column\n self.define_config_commands_column()\n\n #Define fe-asic configuration column\n self.define_feasic_config_commands_column()\n\n #Define adc asic configuration column\n self.define_adcasic_config_commands_column()\n\n self.trace_fft_window = None\n self.trace_fft = None\n\n def define_general_commands_column(self):\n columnbase=0\n\n label = Label(self, text=\"General Commands\")\n label.grid(row=0,column=columnbase, columnspan=2)\n\n label = Label(self,text=\"Register to read:\")\n label.grid(sticky=W,row=1,column=columnbase+0)\n\n # Adding register number to read entry box\n self.readreg_number_entry = Entry(self,width=4)\n self.readreg_number_entry.grid(sticky=W,row=1,column=columnbase+1)\n\n # Adding read register result label\n label = Label(self,text=\"Register contents:\")\n label.grid(sticky=W,row=2,column=columnbase+0)\n self.readreg_result = Label(self, text=\"\",width=10)\n self.readreg_result.grid(sticky=W,row=2,column=columnbase+1)\n\n #Adding the read register button\n readreg_button = Button(self, text=\"Read Register\", command=self.call_readRegister)\n readreg_button.grid(row=3,column=columnbase,columnspan=2) \n\n label = Label(self,text=\"Register to write:\")\n label.grid(sticky=W,row=4,column=columnbase+0)\n\n # Adding register number to write entry box\n self.writereg_number_entry = Entry(self,width=4)\n self.writereg_number_entry.grid(sticky=W,row=4,column=columnbase+1)\n\n label = Label(self,text=\"Value to write:\")\n label.grid(sticky=W,row=5,column=columnbase+0)\n\n # Adding register value to write entry box\n self.writereg_value_entry = Entry(self,width=15)\n self.writereg_value_entry.grid(sticky=W,row=5,column=columnbase+1)\n\n #Adding the write register button\n writereg_button = Button(self, text=\"Write Register\", command=self.call_writeRegister)\n writereg_button.grid(row=6,column=columnbase,columnspan=2)\n\n # Adding write register result label\n self.writereg_result = Label(self, text=\"\")\n self.writereg_result.grid(sticky=W,row=7,column=columnbase,columnspan=2)\n\n #Adding the reset plot button\n reset_plot_button = Button(self, text=\"Show/Reset Plots\", command=self.call_reset_plot) \n reset_plot_button.grid(row=8,column=columnbase,columnspan=2)\n\n\n def define_config_commands_column(self):\n columnbase=10\n\n label = Label(self, text=\"Configuration Commands\")\n label.grid(row=0,column=columnbase,columnspan=2)\n\n #Adding the reset button\n reset_button = Button(self, text=\"Reset\", command=self.call_reset)\n reset_button.grid(row=1,column=columnbase,columnspan=2)\n\n #Adding the initialization button\n init_button = Button(self, text=\"Initialize\", command=self.call_initialize)\n init_button.grid(row=2,column=columnbase,columnspan=2)\n\n # Adding asic number to select\n label = Label(self,text=\"ASIC:\")\n label.grid(sticky=W,row=3,column=columnbase+0)\n self.asic_number_entry = Spinbox(self,from_=0,to=self.femb_config.NASICS-1,insertwidth=1,width=4)\n self.asic_number_entry.grid(sticky=W,row=3,column=columnbase+1)\n\n # Adding channel number to select\n label = Label(self,text=\"Channel:\")\n label.grid(sticky=W,row=4,column=columnbase+0)\n self.channel_number_entry = Spinbox(self,from_=0,to=15,insertwidth=3,width=4)\n self.channel_number_entry.grid(sticky=W,row=4,column=columnbase+1)\n\n #Adding the select channel button\n selectChannel_button = Button(self, text=\"Select Channel\", command=self.call_selectChannel)\n selectChannel_button.grid(row=5,column=columnbase,columnspan=2)\n\n self.selectChannel_result = Label(self, text=\"\")\n self.selectChannel_result.grid(sticky=W,row=6,column=columnbase)\n\n def define_feasic_config_commands_column(self):\n columnbase=20\n\n label = Label(self, text=\"FE ASIC Configuration Commands\")\n label.grid(row=0,column=columnbase,columnspan=2)\n\n # FE ASIC config entry\n label = Label(self, text=\"Gain\")\n label.grid(sticky=W,row=1,column=columnbase)\n self.feasic_config_gain_entry = Spinbox(self,values=GAINVALS,state=\"readonly\")\n self.feasic_config_gain_entry.grid(sticky=W,row=1,column=columnbase+1)\n label = Label(self, text=\"Shape\")\n label.grid(sticky=W,row=2,column=columnbase)\n self.feasic_config_shape_entry = Spinbox(self,values=SHAPEVALS,state=\"readonly\")\n self.feasic_config_shape_entry.grid(sticky=W,row=2,column=columnbase+1)\n label = Label(self, text=\"Baseline\")\n label.grid(sticky=W,row=3,column=columnbase)\n self.feasic_config_base_entry = Spinbox(self,values=BASEVALS,state=\"readonly\")\n self.feasic_config_base_entry.grid(sticky=W,row=3,column=columnbase+1)\n \n #Adding the configure all FE-ASIC channels button\n feasic_config_button = Button(self, text=\"Config FE-ASIC\", command=self.call_feasic_config)\n feasic_config_button.grid(row=4,column=columnbase,columnspan=2)\n\n # pulser set button\n\n label = Label(self, text=\"Pulse Height\")\n label.grid(sticky=W,row=5,column=columnbase)\n self.pulser_height_entry = Spinbox(self,from_=0,to=31,insertwidth=2,width=3)\n self.pulser_height_entry.grid(sticky=W,row=5,column=columnbase+1)\n\n self.pulser_enable_var = IntVar()\n self.pulser_enable_entry = Checkbutton(self,text=\"Pulser Enabled\",variable=self.pulser_enable_var)\n self.pulser_enable_entry.grid(sticky=W,row=6,column=columnbase,columnspan=2)\n\n pulser_button = Button(self, text=\"Set Pulser\", command=self.call_set_pulser)\n pulser_button.grid(row=8,column=columnbase,columnspan=2)\n\n self.pulser_result = Label(self, text=\"\")\n self.pulser_result.grid(sticky=W,row=7,column=columnbase,columnspan=2)\n\n def define_adcasic_config_commands_column(self):\n columnbase=30\n\n label = Label(self, text=\"ADC ASIC Configuration Commands\")\n label.grid(row=0,column=columnbase)\n\n #Adding the configure all ADC-ASIC channels button\n label = Label(self, text=\"Offset Current:\")\n label.grid(row=1,column=columnbase)\n self.adc_offset_current_entry = Spinbox(self,from_=0,to=15,insertwidth=2,width=3)\n self.adc_offset_current_entry.grid(sticky=W,row=1,column=columnbase+1)\n\n self.adc_offset_enable_var = IntVar()\n self.adc_offset_enable_entry = Checkbutton(self,text=\"Enable Offset Current\",variable=self.adc_offset_enable_var)\n self.adc_offset_enable_entry.grid(sticky=W,row=2,column=columnbase)\n\n adc_config_button = Button(self, text=\"Config ADC-ASIC\", command=self.call_adcasic_config)\n adc_config_button.grid(row=3,column=columnbase,columnspan=2)\n\n self.adc_result = Label(self, text=\"\")\n self.adc_result.grid(sticky=W,row=4,column=columnbase,columnspan=2)\n\n adc_sync_button = Button(self, text=\"Sync ADCs\", command=self.call_sync_adc)\n adc_sync_button.grid(row=5,column=columnbase,columnspan=2)\n\n self.adc_sync_result = Label(self, text=\"\")\n self.adc_sync_result.grid(sticky=W,row=6,column=columnbase,columnspan=2)\n\n\n def call_readRegister(self):\n regnum = self.readreg_number_entry.get()\n try:\n regnum = int(regnum)\n if regnum < 0:\n raise ValueError(\"regnum must be >= 0\")\n except ValueError:\n self.readreg_result[\"text\"] = \"Error: Register must be a number >= 0\"\n return\n message = \"\"\n regVal = self.femb_config.femb.read_reg(regnum)\n if regVal != None:\n message = \"0x{:08x}\".format(regVal)\n else:\n message = \"Error: regVal = None\"\n self.readreg_result[\"text\"] = message\n\n def call_writeRegister(self):\n regnum = self.writereg_number_entry.get()\n try:\n regnum = int(regnum)\n if regnum < 0:\n raise ValueError(\"regnum must be >= 0\")\n except ValueError:\n self.writereg_result[\"text\"] = \"Error: Register must be a number >= 0\"\n return\n regval = self.writereg_value_entry.get()\n message = \"value must be int literal e.g. 123, 0xF3, 0b0101\"\n if regval[:2] == \"0x\":\n try:\n regval = int(regval,16)\n except ValueError:\n self.writereg_result[\"text\"] = message\n return\n elif regval[:2] == \"0b\":\n try:\n regval = int(regval,2)\n except ValueError:\n self.writereg_result[\"text\"] = message\n return\n else:\n try:\n regval = int(regval)\n except ValueError:\n self.writereg_result[\"text\"] = message\n return\n if (regval < 0) or (regval > 0xFFFFFFFF):\n message = 'Value must be > 0 and < 0xFFFFFFFF'\n self.writereg_result[\"text\"] = message\n return\n self.femb_config.femb.write_reg(regnum,regval)\n self.writereg_result[\"text\"] = \"\"\n\n def call_reset(self):\n self.femb_config.resetBoard()\n\n def call_initialize(self):\n self.femb_config.initBoard()\n\n def call_selectChannel(self):\n asic = None\n chan = None\n try:\n asic = int(self.asic_number_entry.get())\n except ValueError:\n self.selectChannel_result[\"text\"] = \"Error asic must be an int\"\n return\n try:\n chan = int(self.channel_number_entry.get())\n except ValueError:\n self.selectChannel_result[\"text\"] = \"Error channel must be an int\"\n return\n message = \"\"\n if asic < 0 or asic >= self.femb_config.NASICS:\n self.selectChannel_result[\"text\"] = \"Error asic only from 0 to {}\".format(self.femb_config.NASICS - 1)\n return\n if chan < 0 or chan >= 16:\n self.selectChannel_result[\"text\"] = \"Error channel only from 0 to 15\"\n return\n self.femb_config.selectChannel(asic,chan)\n self.selectChannel_result[\"text\"] = \"\"\n\n def call_feasic_config(self):\n gain = GAINVALS.index(self.feasic_config_gain_entry.get())\n shape = SHAPEVALS.index(self.feasic_config_shape_entry.get())\n base = BASEVALS.index(self.feasic_config_base_entry.get())\n self.femb_config.configFeAsic(gain,shape,base)\n\n def call_set_pulser(self):\n enabled = self.pulser_enable_var.get()\n if not(enabled == 0 or enabled == 1):\n raise ValueError(\"Pulser enabled must be 0 or 1\")\n pulseHeight = None\n try:\n pulseHeight = int(self.pulser_height_entry.get())\n except ValueError:\n self.pulser_result[\"text\"] = \"Error pulseHeight must be an int\"\n return\n if pulseHeight < 0 or pulseHeight >= 32:\n self.pulser_result[\"text\"] = \"Error pulseHeight must be 0 to 31\"\n return\n self.pulser_result[\"text\"] = \"\"\n self.femb_config.setInternalPulser(enabled,pulseHeight)\n\n def call_adcasic_config(self):\n print(\"call_adcasic_config\")\n enabled = self.adc_offset_enable_var.get()\n if not(enabled == 0 or enabled == 1):\n raise ValueError(\"Pulser enabled must be 0 or 1\")\n offsetCurrent = None\n try:\n offsetCurrent = int(self.adc_offset_current_entry.get())\n except ValueError:\n self.adc_result[\"text\"] = \"Error offsetCurrent must be an int\"\n return\n if offsetCurrent < 0 or offsetCurrent >= 16:\n self.adc_result[\"text\"] = \"Error offsetCurrent must be 0 to 15\"\n return\n self.adc_result[\"text\"] = \"\"\n\n f2default = 0\n clkdefault = \"fifo\"\n if hasattr(self.femb_config,\"F2DEFAULT\"):\n f2default = self.femb_config.F2DEFAULT\n if hasattr(self.femb_config,\"CLKDEFAULT\"):\n clkdefault = self.femb_config.CLKDEFAULT\n\n clockMonostable = False\n clockExternal = False\n clockFromFIFO = False\n if clkdefault==\"fifo\":\n clockFromFIFO = True\n elif clkdefault==\"monostable\":\n clockMonostable = True\n elif clkdefault==\"external\":\n clockExternal = True\n else:\n print(\"Error: CLKDEFAULT='{}' not one of the allowed options. Try fife, monostable, or external..\".format(clkdefault))\n\n self.femb_config.configAdcAsic(enableOffsetCurrent=enabled,offsetCurrent=offsetCurrent,f2=f2default,clockMonostable=clockMonostable,clockExternal=clockExternal,clockFromFIFO=clockFromFIFO,pdsr=1,pcsr=1)\n #nRetries = 5\n #for iRetry in range(nRetries):\n # self.femb_config.configAdcAsic(enableOffsetCurrent=enabled,offsetCurrent=offsetCurrent,f2=f2default,clockMonostable=clockMonostable,clockExternal=clockExternal,clockFromFIFO=clockFromFIFO)\n # # Check that board streams data\n # data = self.femb.get_data(1)\n # if data == None:\n # print(\"Board not streaming data, retrying initialization...\")\n # continue # try initializing again\n # self.adc_result[\"text\"] = \"\"\n # return\n #self.adc_result[\"text\"] = \"Board not streaming data after {} tries\".format(nRetries)\n\n def call_sync_adc(self):\n print(\"call_sync_adc\")\n message = \"Sync Error\"\n isAlreadySynced, latchloc1, latchloc2, phase = self.femb_config.syncADC()\n if isAlreadySynced: \n message = \"Already Sync'd\"\n else:\n \t message = \"Latch latency {:#010x} {:#010x} Phase: {:#010x}\".format(latchloc1,latchloc2,phase)\n self.adc_sync_result[\"text\"] = message\n\n def call_quit(self):\n print(\"call_adcasic_config\")\n\n def call_reset_plot(self):\n if self.trace_fft_window:\n self.trace_fft_window.destroy()\n self.trace_fft_window = Toplevel(self)\n self.trace_fft_window.title(\"Trace FFT Window\")\n self.trace_fft = TRACE_FFT_WINDOW(self.trace_fft_window)\n\n###################################################\n\ndef main():\n \n root = Tk()\n root.title(\"Configuration Window\")\n window = CONFIGURATION_WINDOW(root)\n window.call_reset_plot()\n root.mainloop() \n","sub_path":"femb_python/configuration_window.py","file_name":"configuration_window.py","file_ext":"py","file_size_in_byte":15460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"190082347","text":"# File name: rot13.py\n# Author: Jamie Bergen\n# Date created: 08/01/2013\n# Date last modified: 02/09/2014\n# Python version: 2.7\n# Description: Modifies a string by shifting each letter 13 spaces\n\n\n# Make a list of strings containing the alphabet.\n# (This is based on the ASCII table in which\n# a-z map to consecutive integers from 97-122.)\n\nalpha_list = []\nfor i in range(97, 123):\n alpha_list.append(chr(i))\n\n# Helper function that performs rot13 conversion on a single character\n\ndef find_rot13(char):\n rot13 = ord(char) + 13\n rot13_mod = (rot13 - 96) % 26\n rot13_mod += 96\n return chr(rot13_mod)\n\n# Tests for find_rot13\n\n# print find_rot13('a') #n\n# print find_rot13('n') #a\n# print find_rot13('z') #m\n# print find_rot13('o') #b\n# print find_rot13('b') #o\n# print find_rot13('x') #k\n# print find_rot13('k') #x\n\n# Function that converts a string to rot13\n# (Case and characters outside a-z are preserved)\n\ndef rot13(s):\n result = \"\"\n for char in s:\n if char in alpha_list:\n result += find_rot13(char)\n elif char.lower() in alpha_list:\n result += find_rot13(char.lower()).upper()\n else:\n result += char\n return result\n \n# print rot13('Hello! hi. how are you?')\n# print rot13('Uryyb! uv. ubj ner lbh?')","sub_path":"rot13.py","file_name":"rot13.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"81990857","text":"import numpy as np\nimport sys\nimport os\nimport colorama\nfrom colorama import Fore, Back, Style\nimport time\nfrom powerups import Powerups\n\nclass Fireball(Powerups):\n ''' defines the fireball power ups for ball'''\n def __init__(self, pad, length, breadth):\n self.original_len = pad.len\n self.x = np.zeros((breadth, length),dtype=object)\n self.y = np.zeros((breadth, length),dtype=object)\n self.length = length\n self.breadth = breadth\n self.status = np.zeros((breadth, length),dtype=object)\n for i in range(breadth):\n for j in range(length):\n self.status[i][j] = 2\n self.x_speed = np.zeros((breadth, length),dtype=object)\n self.y_speed = np.zeros((breadth, length),dtype=object)\n self.stime = 0\n self.b = 0\n self.alreadyOn = 0\n Powerups.__init__(self, Back.BLACK + Fore.WHITE + \"*\", Back.BLACK + Fore.WHITE + \"*\")\n\n def initials(self ,main):\n for i in range(self.breadth):\n for j in range(self.length):\n if (main['design'].powers[i][j] == 8):\n self.x[i][j] = i\n self.y[i][j] = j\n\n def change_val(self, x, y, a, main, i, j):\n self.x[i][j] = x\n self.y[i][j] = y\n if (self.status[i][j] == 1 and a == 0):\n self.stime = int(round(time.time()))\n if (self.alreadyOn == 0):\n self.alreadyOn = self.alreadyOn + 1\n main['ball'].fireball = 1 \n self.b = 0\n self.status[i][j] = a\n self.x_speed[i][j] = main['ball'].last_xspeed\n self.y_speed[i][j] = main['ball'].last_yspeed\n \n def drawing(self, main):\n for i in range(self.breadth):\n for j in range(self.length):\n if (self.status[i][j] == 1):\n main['grid'].change_xy(self.x[i][j] ,self.y[i][j] ,Back.BLACK + Fore.WHITE + \"*\")\n main['grid'].change_xy(self.x[i][j] ,self.y[i][j] + 1 ,Back.BLACK + Fore.WHITE + \"*\")\n\n def mov(self ,main):\n for i in range(self.breadth):\n for j in range(self.length):\n if (self.status[i][j] == 1):\n main['grid'].change_xy(self.x[i][j] ,self.y[i][j] ,Back.BLACK + Fore.WHITE + \" \")\n main['grid'].change_xy(self.x[i][j] ,self.y[i][j] + 1 ,Back.BLACK + Fore.WHITE + \" \")\n if (self.x[i][j] + self.x_speed[i][j] >= main['ball'].max_u ):\n self.x[i][j] = main['ball'].max_u-1\n else:\n self.x[i][j] = self.x[i][j] + self.x_speed[i][j]\n self.y[i][j] = self.y[i][j] + self.y_speed[i][j]\n if (self.y[i][j] + self.y_speed[i][j] <=0 or self.y[i][j] + self.y_speed[i][j] >=85):\n self.y_speed[i][j] = (-1)*self.y_speed[i][j]\n main['fireball'].drawing(main)\n if (self.x[i][j] >= main['ball'].max_u - 2):\n val = main['pad'].y_value\n l = main['pad'].len - 1\n diff = int(self.y[i][j]) - val\n if (val <= int(self.y[i][j]) and diff <= l):\n main['fireball'].change_val(self.x[i][j] ,self.y[i][j] ,0 ,main, i, j)\n elif (val <= int(self.y[i][j] + 1) and int(self.y[i][j] + 1) - val <= l):\n main['fireball'].change_val(self.x[i][j] ,self.y[i][j] ,0, main, i, j)\n else:\n main['grid'].change_xy(i ,j ,Back.BLACK + Fore.WHITE + \" \")\n main['grid'].change_xy(i ,j + 1 ,Back.BLACK + Fore.WHITE + \" \")\n main['fireball'].change_val(self.x[i][j] ,self.y[i][j] ,2, main, i, j)\n self.x_speed[i][j] = self.x_speed[i][j] + 0.1\n ","sub_path":"fireball.py","file_name":"fireball.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"360840382","text":"import pytest\nfrom dagster import AssetKey, DagsterInvariantViolationError, Out\nfrom dagster.check import CheckError\nfrom dagster.core.asset_defs import AssetIn, SourceAsset, asset, build_assets_job, multi_asset\nfrom dagster.core.host_representation.external_data import (\n ExternalAssetDependedBy,\n ExternalAssetDependency,\n ExternalAssetNode,\n ExternalSensorData,\n ExternalTargetData,\n external_asset_graph_from_defs,\n)\nfrom dagster.serdes import deserialize_json_to_dagster_namedtuple\n\n\ndef test_single_asset_job():\n @asset\n def asset1():\n return 1\n\n assets_job = build_assets_job(\"assets_job\", [asset1])\n external_asset_nodes = external_asset_graph_from_defs([assets_job], source_assets_by_key={})\n\n assert external_asset_nodes == [\n ExternalAssetNode(\n asset_key=AssetKey(\"asset1\"),\n dependencies=[],\n depended_by=[],\n op_name=\"asset1\",\n op_description=None,\n job_names=[\"assets_job\"],\n output_name=\"result\",\n output_description=None,\n )\n ]\n\n\ndef test_two_asset_job():\n @asset\n def asset1():\n return 1\n\n @asset\n def asset2(asset1):\n assert asset1 == 1\n\n assets_job = build_assets_job(\"assets_job\", [asset1, asset2])\n external_asset_nodes = external_asset_graph_from_defs([assets_job], source_assets_by_key={})\n\n assert external_asset_nodes == [\n ExternalAssetNode(\n asset_key=AssetKey(\"asset1\"),\n dependencies=[],\n depended_by=[\n ExternalAssetDependedBy(\n downstream_asset_key=AssetKey(\"asset2\"), input_name=\"asset1\"\n )\n ],\n op_name=\"asset1\",\n op_description=None,\n job_names=[\"assets_job\"],\n output_name=\"result\",\n output_description=None,\n ),\n ExternalAssetNode(\n asset_key=AssetKey(\"asset2\"),\n dependencies=[\n ExternalAssetDependency(upstream_asset_key=AssetKey(\"asset1\"), input_name=\"asset1\")\n ],\n depended_by=[],\n op_name=\"asset2\",\n op_description=None,\n job_names=[\"assets_job\"],\n output_name=\"result\",\n output_description=None,\n ),\n ]\n\n\ndef test_input_name_matches_output_name():\n not_result = SourceAsset(key=AssetKey(\"not_result\"), description=None)\n\n @asset(ins={\"result\": AssetIn(asset_key=AssetKey(\"not_result\"))})\n def something(result): # pylint: disable=unused-argument\n pass\n\n assets_job = build_assets_job(\"assets_job\", [something], source_assets=[not_result])\n external_asset_nodes = external_asset_graph_from_defs([assets_job], source_assets_by_key={})\n\n assert external_asset_nodes == [\n ExternalAssetNode(\n asset_key=AssetKey(\"not_result\"),\n dependencies=[],\n depended_by=[\n ExternalAssetDependedBy(\n downstream_asset_key=AssetKey(\"something\"), input_name=\"result\"\n )\n ],\n job_names=[],\n ),\n ExternalAssetNode(\n asset_key=AssetKey(\"something\"),\n dependencies=[\n ExternalAssetDependency(\n upstream_asset_key=AssetKey(\"not_result\"), input_name=\"result\"\n )\n ],\n depended_by=[],\n op_name=\"something\",\n output_name=\"result\",\n job_names=[\"assets_job\"],\n ),\n ]\n\n\ndef test_two_downstream_assets_job():\n @asset\n def asset1():\n return 1\n\n @asset\n def asset2_a(asset1):\n assert asset1 == 1\n\n @asset\n def asset2_b(asset1):\n assert asset1 == 1\n\n assets_job = build_assets_job(\"assets_job\", [asset1, asset2_a, asset2_b])\n external_asset_nodes = external_asset_graph_from_defs([assets_job], source_assets_by_key={})\n\n assert external_asset_nodes == [\n ExternalAssetNode(\n asset_key=AssetKey(\"asset1\"),\n dependencies=[],\n depended_by=[\n ExternalAssetDependedBy(\n downstream_asset_key=AssetKey(\"asset2_a\"), input_name=\"asset1\"\n ),\n ExternalAssetDependedBy(\n downstream_asset_key=AssetKey(\"asset2_b\"), input_name=\"asset1\"\n ),\n ],\n op_name=\"asset1\",\n op_description=None,\n job_names=[\"assets_job\"],\n output_name=\"result\",\n output_description=None,\n ),\n ExternalAssetNode(\n asset_key=AssetKey(\"asset2_a\"),\n dependencies=[\n ExternalAssetDependency(upstream_asset_key=AssetKey(\"asset1\"), input_name=\"asset1\")\n ],\n depended_by=[],\n op_name=\"asset2_a\",\n op_description=None,\n job_names=[\"assets_job\"],\n output_name=\"result\",\n output_description=None,\n ),\n ExternalAssetNode(\n asset_key=AssetKey(\"asset2_b\"),\n dependencies=[\n ExternalAssetDependency(upstream_asset_key=AssetKey(\"asset1\"), input_name=\"asset1\")\n ],\n depended_by=[],\n op_name=\"asset2_b\",\n op_description=None,\n job_names=[\"assets_job\"],\n output_name=\"result\",\n output_description=None,\n ),\n ]\n\n\ndef test_cross_job_asset_dependency():\n @asset\n def asset1():\n return 1\n\n @asset\n def asset2(asset1):\n assert asset1 == 1\n\n assets_job1 = build_assets_job(\"assets_job1\", [asset1])\n assets_job2 = build_assets_job(\"assets_job2\", [asset2], source_assets=[asset1])\n external_asset_nodes = external_asset_graph_from_defs(\n [assets_job1, assets_job2], source_assets_by_key={}\n )\n\n assert external_asset_nodes == [\n ExternalAssetNode(\n asset_key=AssetKey(\"asset1\"),\n dependencies=[],\n depended_by=[\n ExternalAssetDependedBy(\n downstream_asset_key=AssetKey(\"asset2\"), input_name=\"asset1\"\n )\n ],\n op_name=\"asset1\",\n op_description=None,\n job_names=[\"assets_job1\"],\n output_name=\"result\",\n output_description=None,\n ),\n ExternalAssetNode(\n asset_key=AssetKey(\"asset2\"),\n dependencies=[\n ExternalAssetDependency(upstream_asset_key=AssetKey(\"asset1\"), input_name=\"asset1\")\n ],\n depended_by=[],\n op_name=\"asset2\",\n op_description=None,\n job_names=[\"assets_job2\"],\n output_name=\"result\",\n output_description=None,\n ),\n ]\n\n\ndef test_same_asset_in_multiple_pipelines():\n @asset\n def asset1():\n return 1\n\n job1 = build_assets_job(\"job1\", [asset1])\n job2 = build_assets_job(\"job2\", [asset1])\n\n external_asset_nodes = external_asset_graph_from_defs([job1, job2], source_assets_by_key={})\n\n assert external_asset_nodes == [\n ExternalAssetNode(\n asset_key=AssetKey(\"asset1\"),\n dependencies=[],\n depended_by=[],\n op_name=\"asset1\",\n op_description=None,\n job_names=[\"job1\", \"job2\"],\n output_name=\"result\",\n output_description=None,\n ),\n ]\n\n\ndef test_basic_multi_asset():\n @multi_asset(\n outs={\n f\"out{i}\": Out(description=f\"foo: {i}\", asset_key=AssetKey(f\"asset{i}\"))\n for i in range(10)\n }\n )\n def assets():\n pass\n\n assets_job = build_assets_job(\"assets_job\", [assets])\n\n external_asset_nodes = external_asset_graph_from_defs([assets_job], source_assets_by_key={})\n\n assert external_asset_nodes == [\n ExternalAssetNode(\n asset_key=AssetKey(f\"asset{i}\"),\n dependencies=[],\n depended_by=[],\n op_name=\"assets\",\n op_description=None,\n job_names=[\"assets_job\"],\n output_name=f\"out{i}\",\n output_description=f\"foo: {i}\",\n )\n for i in range(10)\n ]\n\n\ndef test_inter_op_dependency():\n @asset\n def in1():\n pass\n\n @asset\n def in2():\n pass\n\n @asset\n def downstream(only_in, mixed, only_out): # pylint: disable=unused-argument\n pass\n\n @multi_asset(\n outs={\"only_in\": Out(), \"mixed\": Out(), \"only_out\": Out()},\n internal_asset_deps={\n \"mixed\": {AssetKey(\"in1\"), AssetKey(\"only_in\")},\n \"only_out\": {AssetKey(\"only_in\"), AssetKey(\"mixed\")},\n },\n )\n def assets(in1, in2): # pylint: disable=unused-argument\n pass\n\n assets_job = build_assets_job(\"assets_job\", [in1, in2, assets, downstream])\n\n external_asset_nodes = external_asset_graph_from_defs([assets_job], source_assets_by_key={})\n # sort so that test is deterministic\n sorted_nodes = sorted(\n [\n node._replace(\n dependencies=sorted(node.dependencies, key=lambda d: d.upstream_asset_key),\n depended_by=sorted(node.depended_by, key=lambda d: d.downstream_asset_key),\n )\n for node in external_asset_nodes\n ],\n key=lambda n: n.asset_key,\n )\n\n assert sorted_nodes == [\n ExternalAssetNode(\n asset_key=AssetKey([\"downstream\"]),\n dependencies=[\n ExternalAssetDependency(upstream_asset_key=AssetKey([\"mixed\"]), input_name=\"mixed\"),\n ExternalAssetDependency(\n upstream_asset_key=AssetKey([\"only_in\"]), input_name=\"only_in\"\n ),\n ExternalAssetDependency(\n upstream_asset_key=AssetKey([\"only_out\"]), input_name=\"only_out\"\n ),\n ],\n depended_by=[],\n op_name=\"downstream\",\n op_description=None,\n job_names=[\"assets_job\"],\n output_name=\"result\",\n ),\n ExternalAssetNode(\n asset_key=AssetKey([\"in1\"]),\n dependencies=[],\n depended_by=[\n ExternalAssetDependedBy(downstream_asset_key=AssetKey([\"mixed\"]), input_name=\"in1\"),\n ExternalAssetDependedBy(\n downstream_asset_key=AssetKey([\"only_in\"]), input_name=\"in1\"\n ),\n ],\n op_name=\"in1\",\n op_description=None,\n job_names=[\"assets_job\"],\n output_name=\"result\",\n ),\n ExternalAssetNode(\n asset_key=AssetKey([\"in2\"]),\n dependencies=[],\n depended_by=[\n ExternalAssetDependedBy(\n downstream_asset_key=AssetKey([\"only_in\"]), input_name=\"in2\"\n )\n ],\n op_name=\"in2\",\n op_description=None,\n job_names=[\"assets_job\"],\n output_name=\"result\",\n ),\n ExternalAssetNode(\n asset_key=AssetKey([\"mixed\"]),\n dependencies=[\n ExternalAssetDependency(upstream_asset_key=AssetKey([\"in1\"]), input_name=\"in1\"),\n ExternalAssetDependency(\n upstream_asset_key=AssetKey([\"only_in\"]), output_name=\"only_in\"\n ),\n ],\n depended_by=[\n ExternalAssetDependedBy(\n downstream_asset_key=AssetKey([\"downstream\"]), input_name=\"mixed\"\n ),\n ExternalAssetDependedBy(\n downstream_asset_key=AssetKey([\"only_out\"]), output_name=\"mixed\"\n ),\n ],\n op_name=\"assets\",\n op_description=None,\n job_names=[\"assets_job\"],\n output_name=\"mixed\",\n ),\n ExternalAssetNode(\n asset_key=AssetKey([\"only_in\"]),\n dependencies=[\n ExternalAssetDependency(upstream_asset_key=AssetKey([\"in1\"]), input_name=\"in1\"),\n ExternalAssetDependency(upstream_asset_key=AssetKey([\"in2\"]), input_name=\"in2\"),\n ],\n depended_by=[\n ExternalAssetDependedBy(\n downstream_asset_key=AssetKey([\"downstream\"]), input_name=\"only_in\"\n ),\n ExternalAssetDependedBy(\n downstream_asset_key=AssetKey([\"mixed\"]), output_name=\"only_in\"\n ),\n ExternalAssetDependedBy(\n downstream_asset_key=AssetKey([\"only_out\"]), output_name=\"only_in\"\n ),\n ],\n op_name=\"assets\",\n op_description=None,\n job_names=[\"assets_job\"],\n output_name=\"only_in\",\n ),\n ExternalAssetNode(\n asset_key=AssetKey([\"only_out\"]),\n dependencies=[\n ExternalAssetDependency(\n upstream_asset_key=AssetKey([\"mixed\"]), output_name=\"mixed\"\n ),\n ExternalAssetDependency(\n upstream_asset_key=AssetKey([\"only_in\"]), output_name=\"only_in\"\n ),\n ],\n depended_by=[\n ExternalAssetDependedBy(\n downstream_asset_key=AssetKey([\"downstream\"]), input_name=\"only_out\"\n ),\n ],\n op_name=\"assets\",\n op_description=None,\n job_names=[\"assets_job\"],\n output_name=\"only_out\",\n ),\n ]\n\n\ndef test_source_asset_with_op():\n\n foo = SourceAsset(key=AssetKey(\"foo\"), description=None)\n\n @asset\n def bar(foo): # pylint: disable=unused-argument\n pass\n\n assets_job = build_assets_job(\"assets_job\", [bar], source_assets=[foo])\n\n external_asset_nodes = external_asset_graph_from_defs([assets_job], source_assets_by_key={})\n assert external_asset_nodes == [\n ExternalAssetNode(\n asset_key=AssetKey(\"foo\"),\n op_description=None,\n dependencies=[],\n depended_by=[ExternalAssetDependedBy(AssetKey(\"bar\"), input_name=\"foo\")],\n job_names=[],\n ),\n ExternalAssetNode(\n asset_key=AssetKey(\"bar\"),\n op_name=\"bar\",\n op_description=None,\n dependencies=[ExternalAssetDependency(AssetKey(\"foo\"), input_name=\"foo\")],\n depended_by=[],\n job_names=[\"assets_job\"],\n output_name=\"result\",\n ),\n ]\n\n\ndef test_unused_source_asset():\n foo = SourceAsset(key=AssetKey(\"foo\"), description=\"abc\")\n bar = SourceAsset(key=AssetKey(\"bar\"), description=\"def\")\n\n external_asset_nodes = external_asset_graph_from_defs(\n [], source_assets_by_key={AssetKey(\"foo\"): foo, AssetKey(\"bar\"): bar}\n )\n assert external_asset_nodes == [\n ExternalAssetNode(\n asset_key=AssetKey(\"foo\"),\n op_description=\"abc\",\n dependencies=[],\n depended_by=[],\n job_names=[],\n ),\n ExternalAssetNode(\n asset_key=AssetKey(\"bar\"),\n op_description=\"def\",\n dependencies=[],\n depended_by=[],\n job_names=[],\n ),\n ]\n\n\ndef test_used_source_asset():\n bar = SourceAsset(key=AssetKey(\"bar\"), description=\"def\")\n\n @asset\n def foo(bar):\n assert bar\n\n job1 = build_assets_job(\"job1\", [foo], source_assets=[bar])\n\n external_asset_nodes = external_asset_graph_from_defs(\n [job1], source_assets_by_key={AssetKey(\"bar\"): bar}\n )\n assert external_asset_nodes == [\n ExternalAssetNode(\n asset_key=AssetKey(\"bar\"),\n op_description=\"def\",\n dependencies=[],\n depended_by=[\n ExternalAssetDependedBy(downstream_asset_key=AssetKey([\"foo\"]), input_name=\"bar\")\n ],\n job_names=[],\n ),\n ExternalAssetNode(\n asset_key=AssetKey(\"foo\"),\n op_name=\"foo\",\n op_description=None,\n dependencies=[\n ExternalAssetDependency(upstream_asset_key=AssetKey([\"bar\"]), input_name=\"bar\")\n ],\n depended_by=[],\n job_names=[\"job1\"],\n output_name=\"result\",\n output_description=None,\n ),\n ]\n\n\ndef test_source_asset_conflicts_with_asset():\n bar_source_asset = SourceAsset(key=AssetKey(\"bar\"), description=\"def\")\n\n @asset\n def bar():\n pass\n\n job1 = build_assets_job(\"job1\", [bar])\n\n with pytest.raises(DagsterInvariantViolationError):\n external_asset_graph_from_defs(\n [job1], source_assets_by_key={AssetKey(\"bar\"): bar_source_asset}\n )\n\n\ndef test_input_name_or_output_name_dep_by():\n with pytest.raises(CheckError, match=\"input `foo` and output `bar`\"):\n ExternalAssetDependedBy(\n downstream_asset_key=AssetKey(\"bar\"), input_name=\"foo\", output_name=\"bar\"\n )\n with pytest.raises(CheckError, match=\"input `None` and output `None`\"):\n ExternalAssetDependedBy(\n downstream_asset_key=AssetKey(\"bar\"), input_name=None, output_name=None\n )\n\n\ndef test_input_name_or_output_name_dependency():\n with pytest.raises(CheckError, match=\"input `foo` and output `bar`\"):\n ExternalAssetDependency(\n upstream_asset_key=AssetKey(\"bar\"), input_name=\"foo\", output_name=\"bar\"\n )\n with pytest.raises(CheckError, match=\"input `None` and output `None`\"):\n ExternalAssetDependency(\n upstream_asset_key=AssetKey(\"bar\"), input_name=None, output_name=None\n )\n\n\ndef test_back_compat_external_sensor():\n SERIALIZED_0_12_10_SENSOR = '{\"__class__\": \"ExternalSensorData\", \"description\": null, \"min_interval\": null, \"mode\": \"default\", \"name\": \"my_sensor\", \"pipeline_name\": \"my_pipeline\", \"solid_selection\": null}'\n external_sensor_data = deserialize_json_to_dagster_namedtuple(SERIALIZED_0_12_10_SENSOR)\n assert isinstance(external_sensor_data, ExternalSensorData)\n assert len(external_sensor_data.target_dict) == 1\n assert \"my_pipeline\" in external_sensor_data.target_dict\n target = external_sensor_data.target_dict[\"my_pipeline\"]\n assert isinstance(target, ExternalTargetData)\n assert target.pipeline_name == \"my_pipeline\"\n","sub_path":"python_modules/dagster/dagster_tests/core_tests/host_representation_tests/test_external_data.py","file_name":"test_external_data.py","file_ext":"py","file_size_in_byte":18269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"351415397","text":"# -*- coding: utf-8 -*-\n\nfrom icebergsdk.resources.base import UpdateableIcebergObject, IcebergObject\n\n\nclass Application(UpdateableIcebergObject):\n endpoint = 'application'\n\n # Messages\n def inbox(self):\n return self.get_list(\"%sinbox/\" % self.resource_uri)\n\n def outbox(self):\n return self.get_list(\"%soutbox/\" % self.resource_uri)\n\n\n def fetch_secret_key(self):\n \treturn self.request(\"%sfetchSecretKey/\" % self.resource_uri)[\"secret_key\"]\n\n def auth_me(self):\n \"\"\"\n Return the access_token for the current user on this application\n \"\"\"\n return self.request(\"%sauth_me/\" % self.resource_uri)[\"access_token\"]\n\n\n\n\nclass ApplicationCommissionSettings(UpdateableIcebergObject):\n endpoint = 'application_commission_settings'\n\n\n\nclass ApplicationMerchantPolicies(UpdateableIcebergObject):\n endpoint = 'application_merchant_policies'\n\n\n\n def set_mandatory_fields(self, payment_card=None, products=None, \n return_info=None, shipping_info=None, \n store_contact=None, store_information=None,\n store_legal=None):\n if not hasattr(self, \"mandatory_info\"):\n self.mandatory_info = {}\n\n if payment_card is not None:\n self.mandatory_info[\"payment_card\"] = payment_card\n if products is not None:\n self.mandatory_info[\"products\"] = products\n if return_info is not None:\n self.mandatory_info[\"return_info\"] = return_info\n if shipping_info is not None:\n self.mandatory_info[\"shipping_info\"] = shipping_info\n if store_contact is not None:\n self.mandatory_info[\"store_contact\"] = store_contact\n if store_information is not None:\n self.mandatory_info[\"store_information\"] = store_information\n if store_legal is not None:\n self.mandatory_info[\"store_legal\"] = store_legal\n\n self.save()\n\n\nclass ApplicationTransaction(IcebergObject):\n endpoint = 'app_transaction'\n\n \n","sub_path":"icebergsdk/resources/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"190557219","text":"from django.db import models\n\n\n# Таблица сотрудников (Users)\nclass Users(models.Model):\n fio = models.CharField(max_length = 200)\n\n# Таблица сотрудников Inventar\nclass Inventar(models.Model):\n title = models.CharField(max_length = 200)\n\n# Таблица инвентаря, закреплённого за сотрудниками (User_inventar)\nclass User_inventar(models.Model):\n user_id = models.ForeignKey(Users, on_delete=models.CASCADE)\n inventar_id = models.ForeignKey(Inventar, on_delete=models.CASCADE)\n\n# Таблица истории инвентаря\nclass Inventar_history(models.Model):\n user_id = models.ForeignKey(Users, on_delete=models.CASCADE)\n inventar_id = models.ForeignKey(Inventar, on_delete=models.CASCADE)\n start_date = models.DateTimeField('Дата поступления')\n end_date = models.DateTimeField('Дата списания')\n\n \n","sub_path":"crm/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"613814497","text":"\nimport smart_imports\n\nsmart_imports.all()\n\n\nclass IndexFilter(utils_list_filter.ListFilter):\n ELEMENTS = [utils_list_filter.reset_element(),\n utils_list_filter.choice_element('сортировать по:', attribute='order_by', choices=relations.ORDER_BY.select('value', 'text'), default_value=relations.ORDER_BY.NAME.value)]\n\n\n@dext_old_views.validator(code='clans.not_owner', message='Вы не являетесь владельцем гильдии')\ndef validate_ownership(resource, *args, **kwargs): return resource.clan_info.is_owner_of(resource.clan)\n\n\nclass ClansResource(utils_resources.Resource):\n\n @dext_old_views.validate_argument('clan', prototypes.ClanPrototype.get_by_id, 'clans', 'неверный идентификатор гильдии')\n def initialize(self, clan=None, *args, **kwargs):\n super(ClansResource, self).initialize(*args, **kwargs)\n self.clan = clan\n self.clan_info = logic.ClanInfo(account=self.account)\n\n self.can_moderate_clans = self.request.user.has_perm('clans.moderate_clan')\n\n @dext_old_views.validate_argument('account', accounts_prototypes.AccountPrototype.get_by_id, 'clans.account_clan', 'неверный идентификатор аккаунта')\n @dext_old_views.handler('account-clan')\n def account_clan(self, account):\n clan_info = logic.ClanInfo(account=account)\n if clan_info.clan_id is not None:\n return self.redirect(dext_urls.url('accounts:clans:show', clan_info.clan_id))\n return self.auto_error('clans.account_clan.no_clan', 'Пользователь не состоит в гильдии')\n\n @dext_old_views.validate_argument('page', int, 'clans', 'неверная страница')\n @dext_old_views.validate_argument('order_by', lambda o: relations.ORDER_BY(int(o)), 'clans', 'неверный параметр сортировки')\n @dext_old_views.handler('')\n def index(self, page=1, order_by=relations.ORDER_BY.NAME):\n\n clans_query = prototypes.ClanPrototype._model_class.objects.all()\n\n clans_number = clans_query.count()\n\n page = int(page) - 1\n\n url_builder = dext_urls.UrlBuilder(dext_urls.url('accounts:clans:'), arguments={'order_by': order_by.value})\n\n index_filter = IndexFilter(url_builder=url_builder, values={'order_by': order_by.value})\n\n paginator = utils_pagination.Paginator(page, clans_number, conf.settings.CLANS_ON_PAGE, url_builder)\n\n if paginator.wrong_page_number:\n return self.redirect(paginator.last_page_url, permanent=False)\n\n clans_query = clans_query.order_by(order_by.order_field)\n\n clans_from, clans_to = paginator.page_borders(page)\n\n clans = [prototypes.ClanPrototype(clan_model) for clan_model in clans_query[clans_from:clans_to]]\n\n memberships = [prototypes.MembershipPrototype(membership_model) for membership_model in prototypes.MembershipPrototype._db_filter(clan__in=[clan.id for clan in clans],\n role=relations.MEMBER_ROLE.LEADER)]\n accounts = {account_model.id: accounts_prototypes.AccountPrototype(model=account_model)\n for account_model in accounts_prototypes.AccountPrototype._db_filter(id__in=[membership.account_id for membership in memberships])}\n leaders = {membership.clan_id: accounts[membership.account_id] for membership in memberships}\n\n return self.template('clans/index.html',\n {'clans': clans,\n 'page_id': relations.PAGE_ID.INDEX,\n 'paginator': paginator,\n 'index_filter': index_filter,\n 'leaders': leaders})\n\n @dext_old_views.handler('#clan', name='show')\n def show(self):\n\n roles = {member.account_id: member.role for member in prototypes.MembershipPrototype.get_list_by_clan_id(self.clan.id)}\n accounts = sorted(accounts_prototypes.AccountPrototype.get_list_by_id(list(roles.keys())), key=lambda a: (roles[a.id].value, a.nick_verbose))\n heroes = {hero.account_id: hero for hero in heroes_logic.load_heroes_by_account_ids(list(roles.keys()))}\n\n active_accounts_number = sum((1 for account in accounts if account.is_active), 0)\n affect_game_accounts_number = sum((1 for account in accounts if account.can_affect_game), 0)\n\n return self.template('clans/show.html',\n {'page_id': relations.PAGE_ID.SHOW,\n 'clan_meta_object': meta_relations.Clan.create_from_object(self.clan),\n 'roles': roles,\n 'accounts': accounts,\n 'leader': accounts[0],\n 'active_state_days': accounts_conf.settings.ACTIVE_STATE_TIMEOUT // (24 * 60 * 60),\n 'affect_game_accounts_number': affect_game_accounts_number,\n 'active_accounts_number': active_accounts_number,\n 'heroes': heroes})\n\n @utils_decorators.login_required\n @validate_ownership()\n @accounts_views.validate_ban_any()\n @dext_old_views.handler('#clan', 'edit')\n def edit(self):\n form = forms.ClanForm(initial={'name': self.clan.name,\n 'abbr': self.clan.abbr,\n 'motto': self.clan.motto,\n 'description': self.clan.description})\n return self.template('clans/edit.html',\n {'form': form,\n 'page_id': relations.PAGE_ID.EDIT})\n\n @utils_decorators.login_required\n @validate_ownership()\n @accounts_views.validate_ban_any()\n @dext_old_views.handler('#clan', 'update', method='post')\n def update(self):\n form = forms.ClanForm(self.request.POST)\n\n if not form.is_valid():\n return self.json_error('clans.update.form_errors', form.errors)\n\n if prototypes.ClanPrototype._db_filter(name=form.c.name).exclude(id=self.clan.id).exists():\n return self.json_error('clans.update.name_exists', 'Гильдия с таким названием уже существует')\n\n if prototypes.ClanPrototype._db_filter(abbr=form.c.abbr).exclude(id=self.clan.id).exists():\n return self.json_error('clans.update.abbr_exists', 'Гильдия с такой аббревиатурой уже существует')\n\n self.clan.update(abbr=form.c.abbr,\n name=form.c.name,\n motto=form.c.motto,\n description=form.c.description)\n\n return self.json_ok()\n\n @utils_decorators.login_required\n @dext_old_views.handler('#clan', 'remove', method='post')\n def remove(self):\n\n if not self.can_moderate_clans:\n\n if not self.clan_info.is_owner_of(self.clan):\n return self.json_error('clans.not_owner',\n 'Вы не являетесь владельцем гильдии')\n\n if self.clan.members_number > 1:\n return self.json_error('clans.remove.not_empty_clan',\n 'Можно удалить только «пустую» гильдию (сначала удалите всех членов кроме себя)')\n\n self.clan.remove()\n\n return self.json_ok()\n\n\n@dext_old_views.validator(code='clans.membership.account_has_invite', message='Игрок уже отправил заявку на вступление или получил приглашение в вашу гильдию')\ndef validate_account_has_invite(resource, account, **kwargs): return prototypes.MembershipRequestPrototype.get_for(account_id=account.id, clan_id=resource.clan_info.clan_id) is None\n\n\n@dext_old_views.validator(code='clans.membership.clan_has_request', message='Вы уже отправили заявку на вступление или получили приглашение в эту гильдию')\ndef validate_clan_has_request(resource, clan, **kwargs): return prototypes.MembershipRequestPrototype.get_for(account_id=resource.account.id, clan_id=clan.id) is None\n\n\n@dext_old_views.validator(code='clans.membership.no_invite_rights', message='Вы не можете приглашать игроков в гильдию')\ndef validate_invite_rights(resource, *args, **kwargs): return resource.clan_info.can_invite\n\n\n@dext_old_views.validator(code='clans.membership.no_remove_rights', message='Вы не можете исключать игроков в гильдию')\ndef validate_remove_rights(resource, *args, **kwargs): return resource.clan_info.can_remove\n\n\n@dext_old_views.validator(code='clans.membership.already_in_clan', message='Вы уже состоите в гильдии')\ndef validate_not_in_clan(resource, *args, **kwargs): return resource.clan_info.membership is None\n\n\n@dext_old_views.validator(code='clans.membership.not_in_clan', message='Вы не состоите в гильдии')\ndef validate_in_clan(resource, *args, **kwargs): return resource.clan_info.membership is not None\n\n\n@dext_old_views.validator(code='clans.membership.other_already_in_clan', message='Игрок уже состоит в гильдии')\ndef validate_other_not_in_clan(resource, account, **kwargs): return logic.ClanInfo(account).membership is None\n\n\n@dext_old_views.validator(code='clans.membership.request_not_from_clan', message='Запрос не от гильдии')\ndef validate_request_from_clan(resource, request, **kwargs): return request.type.is_FROM_CLAN\n\n\n@dext_old_views.validator(code='clans.membership.request_not_from_account', message='Запрос не от аккаунта')\ndef validate_request_from_account(resource, request, **kwargs): return request.type.is_FROM_ACCOUNT\n\n\nclass MembershipResource(utils_resources.Resource):\n\n @utils_decorators.login_required\n @accounts_views.validate_fast_account()\n def initialize(self, *args, **kwargs):\n super(MembershipResource, self).initialize(*args, **kwargs)\n self.clan_info = logic.ClanInfo(self.account)\n self.clan = None # Only for macros.html, TODO: remove\n\n @validate_invite_rights()\n @dext_old_views.handler('for-clan')\n def for_clan(self):\n self.clan = self.clan_info.clan\n requests = prototypes.MembershipRequestPrototype.get_for_clan(self.clan_info.clan_id)\n accounts = {model.id: accounts_prototypes.AccountPrototype(model) for model in accounts_prototypes.AccountPrototype._db_filter(id__in=[request.account_id for request in requests])}\n return self.template('clans/membership/for_clan.html',\n {'requests': requests,\n 'page_id': relations.PAGE_ID.FOR_CLAN,\n 'accounts': accounts})\n\n @dext_old_views.handler('for-account')\n def for_account(self):\n requests = prototypes.MembershipRequestPrototype.get_for_account(self.account.id)\n accounts = {model.id: accounts_prototypes.AccountPrototype(model) for model in accounts_prototypes.AccountPrototype._db_filter(id__in=[request.account_id for request in requests] + [request.initiator_id for request in requests])}\n clans = {model.id: prototypes.ClanPrototype(model) for model in prototypes.ClanPrototype._db_filter(id__in=[request.clan_id for request in requests])}\n return self.template('clans/membership/for_account.html',\n {'requests': requests,\n 'accounts': accounts,\n 'clans': clans,\n 'page_id': relations.PAGE_ID.FOR_ACCOUNT, })\n\n @dext_old_views.validate_argument('account', accounts_prototypes.AccountPrototype.get_by_id, 'clans.membership.invite', 'неверный идентификатор аккаунта')\n @validate_invite_rights()\n @validate_other_not_in_clan()\n @validate_account_has_invite()\n @accounts_views.validate_ban_any()\n @dext_old_views.handler('invite', method='get')\n def invite_dialog(self, account):\n return self.template('clans/membership/invite_dialog.html',\n {'invited_account': account,\n 'form': forms.MembershipRequestForm()})\n\n @dext_old_views.validate_argument('clan', prototypes.ClanPrototype.get_by_id, 'clans.membership.request', 'неверный идентификатор гильдии')\n @validate_not_in_clan()\n @validate_clan_has_request()\n @accounts_views.validate_ban_any()\n @dext_old_views.handler('request', method='get')\n def request_dialog(self, clan):\n return self.template('clans/membership/request_dialog.html',\n {'invited_clan': clan,\n 'form': forms.MembershipRequestForm()})\n\n @dext_old_views.validate_argument('account', accounts_prototypes.AccountPrototype.get_by_id, 'clans.membership.invite', 'неверный идентификатор аккаунта')\n @validate_invite_rights()\n @validate_other_not_in_clan()\n @validate_account_has_invite()\n @accounts_views.validate_ban_any()\n @dext_old_views.handler('invite', method='post')\n def invite(self, account):\n form = forms.MembershipRequestForm(self.request.POST)\n if not form.is_valid():\n return self.json_error('clans.membership.invite.form_errors', form.errors)\n\n request = prototypes.MembershipRequestPrototype.create(initiator=self.account,\n account=account,\n clan=self.clan_info.clan,\n text=form.c.text,\n type=relations.MEMBERSHIP_REQUEST_TYPE.FROM_CLAN)\n\n request.create_invite_message(initiator=self.account)\n\n return self.json_ok()\n\n @dext_old_views.validate_argument('clan', prototypes.ClanPrototype.get_by_id, 'clans.membership.request', 'неверный идентификатор гильдии')\n @validate_not_in_clan()\n @validate_clan_has_request()\n @accounts_views.validate_ban_any()\n @dext_old_views.handler('request', method='post')\n def request_post(self, clan):\n form = forms.MembershipRequestForm(self.request.POST)\n if not form.is_valid():\n return self.json_error('clans.membership.request.form_errors', form.errors)\n\n request = prototypes.MembershipRequestPrototype.create(initiator=self.account,\n account=self.account,\n clan=clan,\n text=form.c.text,\n type=relations.MEMBERSHIP_REQUEST_TYPE.FROM_ACCOUNT)\n\n request.create_request_message(initiator=self.account)\n\n return self.json_ok()\n\n @django_transaction.atomic\n @dext_old_views.validate_argument('request', prototypes.MembershipRequestPrototype.get_by_id, 'clan.membership.accept_request', 'Неверный идентификатор приглашения')\n @validate_invite_rights()\n @accounts_views.validate_ban_any()\n @validate_request_from_account()\n @dext_old_views.handler('accept-request', method='post')\n def accept_request(self, request):\n accepted_account = accounts_prototypes.AccountPrototype.get_by_id(request.account_id)\n self.clan_info.clan.add_member(accepted_account)\n request.create_accept_request_message(initiator=self.account)\n request.remove()\n return self.json_ok()\n\n @django_transaction.atomic\n @dext_old_views.validate_argument('request', prototypes.MembershipRequestPrototype.get_by_id, 'clan.membership.accept_invite', 'Неверный идентификатор приглашения')\n @validate_not_in_clan()\n @validate_request_from_clan()\n @accounts_views.validate_ban_any()\n @dext_old_views.handler('accept-invite', method='post')\n def accept_invite(self, request):\n prototypes.ClanPrototype.get_by_id(request.clan_id).add_member(self.account)\n request.remove()\n return self.json_ok()\n\n @django_transaction.atomic\n @dext_old_views.validate_argument('request', prototypes.MembershipRequestPrototype.get_by_id, 'clan.membership.reject_request', 'Неверный идентификатор приглашения')\n @validate_invite_rights()\n @validate_request_from_account()\n @accounts_views.validate_ban_any()\n @dext_old_views.handler('reject-request', method='post')\n def reject_request(self, request):\n request.create_reject_request_message(initiator=self.account)\n request.remove()\n return self.json_ok()\n\n @django_transaction.atomic\n @dext_old_views.validate_argument('request', prototypes.MembershipRequestPrototype.get_by_id, 'clan.membership.reject_invite', 'Неверный идентификатор приглашения')\n @validate_not_in_clan()\n @validate_request_from_clan()\n @accounts_views.validate_ban_any()\n @dext_old_views.handler('reject-invite', method='post')\n def reject_invite(self, request):\n request.remove()\n return self.json_ok()\n\n @django_transaction.atomic\n @dext_old_views.validate_argument('account', accounts_prototypes.AccountPrototype.get_by_id, 'clan.membership.remove_from_clan', 'Неверный идентификатор пользователя')\n @validate_remove_rights()\n @accounts_views.validate_ban_any()\n @dext_old_views.handler('remove-from-clan', method='post')\n def remove_from_clan(self, account):\n other_clan_info = logic.ClanInfo(account)\n if other_clan_info.clan_id != self.clan_info.clan_id:\n return self.auto_error('clans.membership.remove_from_clan.not_in_clan', 'Игрок не состоит в вашей гильдии')\n\n if self.clan_info.membership.role.priority >= other_clan_info.membership.role.priority:\n return self.auto_error('clans.membership.remove_from_clan.wrong_role_priority', 'Вы не можете исключить игрока в этом звании')\n\n self.clan_info.clan.remove_member(account)\n\n self.clan_info.clan.create_remove_member_message(self.account, account)\n\n return self.json_ok()\n\n @django_transaction.atomic\n @validate_in_clan()\n @dext_old_views.handler('leave-clan', method='post')\n def leave_clan(self):\n if self.clan_info.membership.role.is_LEADER:\n return self.auto_error('clans.membership.leave_clan.leader', 'Лидер гильдии не может покинуть её. Передайте лидерство или расформируйте гильдию.')\n\n self.clan_info.clan.remove_member(self.account)\n\n return self.json_ok()\n","sub_path":"src/the_tale/the_tale/accounts/clans/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"361925571","text":"import json\nimport urllib.request\ntuling_key='bc84fe730bc756e8a421051121343138'\napi_url = \"https://api.ownthink.com/bot\"\n\nclass tyuling_replay:\n def __init__(self):\n print('开始构建DBUtil')\n def get_message(message,userid):\n req = {\n \"spoken\":message,\n \"appid\": tuling_key,\n \"userid\": userid\n # \"perception\":\n # {\n # \"inputText\":\n # {\n # \"text\": message\n # },\n #\n # \"selfInfo\":\n # {\n # \"location\":\n # {\n # \"city\": \"\",\n # \"province\": \"\",\n # \"street\": \"\"\n # }\n # }\n # },\n # \"userInfo\":\n # {\n # \"appid\": tuling_key,\n # \"userid\": userid\n # }\n }\n req = json.dumps(req).encode('utf8')\n http_post = urllib.request.Request(api_url, data=req, headers={'content-type': 'application/json'})\n response = urllib.request.urlopen(http_post)\n response_str = response.read().decode('utf8')\n response_dic = json.loads(response_str)\n results_code = response_dic['data']['type']\n print(results_code)\n if results_code == 5000:\n results_text =response_dic['data']['info']['text']\n else:\n results_text = \"换个问问呢\"\n return results_text","sub_path":"app_api/python_AI/tyuling_replay.py","file_name":"tyuling_replay.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"380456493","text":"from helper import *\nfrom encrypt import encrypt\nimport json\n\ndef get_keys():\n\t\"\"\"Extracts key from json file.\"\"\"\n\twith open('keys.json') as f:\n\t\tkeys = json.load(f)\n\treturn keys\n\ndef decrypt(cipher, keys):\n\tpublic_key = keys['public_key']\n\tprivate_key = keys['private_key']\n\tn = public_key[\"n\"]\n\te = public_key[\"e\"]\n\td = private_key[\"d\"]\n\ten_msg = cipher\n\n\tdr_msg = []\n\tfor i in range(0, len(en_msg)):\n\t\tdr_msg.append(chr(power(en_msg[i], d, n)))\n\n\treturn dr_msg \n\n\nif __name__ == '__main__':\n\tkeys = get_keys()\n\tprint(keys)\n\tprint(\"Enter Message to be encoded : \")\n\tmsg = input()\n\n\tcipher = encrypt(msg, keys[\"public_key\"])\n\tprint(cipher)\n\n\tdecoded_text = decrypt(cipher, keys)\n\tprint(decoded_text)","sub_path":"challenges/RSA-1/decrypt.py","file_name":"decrypt.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"643461812","text":"import pandas as pd\nimport numpy as np\nimport featureselection as fselect\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.ensemble import RandomForestClassifier\nfrom xgboost import XGBClassifier\nfrom sklearn.naive_bayes import GaussianNB, BernoulliNB\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.linear_model import SGDClassifier\n\n\ndef KNN_subset(X_train,X_test,y_train,y_test, features):\n index =0\n neighbors = [5,10,12,14,16,20]\n df = pd.DataFrame(columns=['Neighbors','Confusion Matrix'])\n rows = []\n\n for n in neighbors:\n feature = features[index]\n knn = KNeighborsClassifier(n_neighbors=n,n_jobs=-1)\n X_train_knn, X_test_knn = X_train[:,feature], X_test[:,feature]\n knn.fit(X_train_knn,y_train)\n predicted_labels = knn.predict(X_test_knn)\n tn, fp, fn, tp = confusion_matrix(y_test, predicted_labels, labels=[0,1]).ravel()\n convert_matrix = [tn,fp,fn,tp]\n rows.append([n, convert_matrix])\n index +=1\n\n for i in range(len(rows)):\n df = df.append({'Neighbors':rows[i][0],'Confusion Matrix':rows[i][1]}, ignore_index=True)\n\n return df\n\ndef elastic_subset(X_train,X_test,y_train,y_test, features):\n \n index =0\n \n \n df = pd.DataFrame(columns=['Alpha','Confusion Matrix'])\n rows = []\n alphas= [0.0001, 0.001, 0.01]#,0.1,1]\n \n for al in alphas:\n \n feature = features[index]\n regr = SGDClassifier(loss = 'log',alpha= al,penalty = 'l1',random_state=0)\n X_train_knn, X_test_knn = X_train[:,feature], X_test[:,feature]\n regr.fit(X_train_knn,y_train)\n predicted_labels = regr.predict(X_test_knn)\n tn, fp, fn, tp = confusion_matrix(y_test, predicted_labels, labels=[0,1]).ravel()\n convert_matrix = [tn,fp,fn,tp]\n rows.append([al, convert_matrix])\n index +=1\n for i in range(len(rows)):\n df = df.append({'Alpha':rows[i][0],'Confusion Matrix':rows[i][1]}, ignore_index=True)\n\n return df\n\n\n\ndef SVM_subset(X_train, X_test, y_train, y_test,features):\n index =0\n df = pd.DataFrame(columns=['Kernel','C','Gamma','Degree','Confusion Matrix'])\n rows = []\n\n Cs = [1e-1, 1, 1e1, 1e2, 1e3]\n gammas = [1,1e1]\n degrees = [2,3]\n\n for c in Cs:\n feature = features[index]\n linear = LinearSVC(C=c, random_state=0, max_iter=10000)\n\n X_train_linear, X_test_linear = X_train[:,feature], X_test[:,feature]\n\n linear.fit(X_train_linear, y_train)\n predicted_labels = linear.predict(X_test_linear)\n tn, fp, fn, tp = confusion_matrix(y_test, predicted_labels, labels=[0,1]).ravel()\n convert_matrix = [tn,fp,fn,tp]\n rows.append(['linear', c, '', '',convert_matrix])\n index+=1\n\n for gamma in gammas:\n feature = features[index]\n rbf = SVC(kernel = 'rbf', C=c, gamma=gamma, random_state=0, max_iter=10000)\n\n X_train_rbf, X_test_rbf = X_train[:,feature], X_test[:,feature]\n\n rbf.fit(X_train_rbf, y_train)\n predicted_labels = rbf.predict(X_test_rbf)\n tn, fp, fn, tp = confusion_matrix(y_test, predicted_labels, labels=[0,1]).ravel()\n convert_matrix = [tn,fp,fn,tp]\n rows.append(['rbf', c, gamma, '', convert_matrix])\n index+=1\n\n for degree in degrees:\n feature = features[index]\n poly = SVC(kernel='poly', C=c, gamma=gamma, degree=degree, random_state=0, max_iter=10000)\n X_train_poly, X_test_poly = X_train[:,feature], X_test[:,feature]\n\n poly.fit(X_train_poly,y_train)\n predicted_labels = poly.predict(X_test_poly)\n tn, fp, fn, tp = confusion_matrix(y_test, predicted_labels, labels=[0,1]).ravel()\n convert_matrix = [tn,fp,fn,tp]\n rows.append(['poly', c, gamma, degree, convert_matrix])\n index+=1\n\n for i in range(len(rows)):\n df = df.append({'Kernel':rows[i][0],'C':rows[i][1],'Gamma':rows[i][2], 'Degree':rows[i][3],\n 'Confusion Matrix':rows[i][4]}, ignore_index=True)\n\n return df\n\n\ndef rdforest_subset(X_train,X_test,y_train,y_test,features):\n index =0\n df = pd.DataFrame(columns=['N_Estimators','Max_Depth','Confusion Matrix'])\n rows = []\n\n estimators = [ 300, 500]\n max_depths = [5,10,15]\n\n for estimator in estimators:\n for max_d in max_depths:\n feature = features[index]\n rdf = RandomForestClassifier(n_estimators=estimator, max_depth=max_d, random_state=0, n_jobs=-1)\n\n X_train_rdf, X_test_rdf = X_train[:,feature], X_test[:,feature]\n rdf.fit(X_train_rdf, y_train)\n predicted_labels = rdf.predict(X_test_rdf)\n tn, fp, fn, tp = confusion_matrix(y_test, predicted_labels, labels=[0,1]).ravel()\n convert_matrix = [tn,fp,fn,tp]\n rows.append([estimator, max_d, convert_matrix])\n index+=1\n\n for i in range(len(rows)):\n df = df.append({'N_Estimators':rows[i][0],'Max_Depth':rows[i][1],\n 'Confusion Matrix':rows[i][2]}, ignore_index=True)\n\n return df\n\n\n\ndef xgboost_subset(X_train,X_test,y_train,y_test,features):\n index =0\n df = pd.DataFrame(columns=['Max_depth','N_estimators','Confusion Matrix'])\n # ,'N_estimators','ColSample','Subsample',\n # 'Min_Child_Weight','Scale_Pos_Weight',\n rows = []\n\n rate = 0.05\n #xg_alpha = 0\n #xg_lambda = 1\n max_depth = [1,2,3,4]#np.linspace(2, 10, 5, dtype=int)\n n_estimators= [50,100,150]#np.linspace(50, 450, 4, dtype=int)\n # colsample_bytree= np.linspace(0.5, 1, 4)\n # subsample = np.linspace(0.5,1,4)\n # min_child_weight = np.linspace(1, 19, 4, dtype=int)\n # scale_pos_weight=np.linspace(3, 10,4, dtype=int)\n\n for depth in max_depth:\n for estimators in n_estimators:\n feature = features[index]\n # for col in colsample_bytree:\n # for sub in subsample:\n # for child_weight in min_child_weight:\n # for pos_weight in scale_pos_weight:\n # min_child_weight=child_weight,colsample_bytree=col,\n # scale_pos_weight=pos_weight,n_estimators=estimators, subsample=sub\n xgb = XGBClassifier(booster='gbtree',max_depth=depth,learning_rate=rate,n_estimators = estimators)\n X_train_xgb, X_test_xgb = X_train[:,feature], X_test[:,feature]\n xgb.fit(X_train_xgb, y_train)\n predicted_labels = xgb.predict(X_test_xgb)\n tn, fp, fn, tp = confusion_matrix(y_test, predicted_labels, labels=[0,1]).ravel()\n convert_matrix = [tn,fp,fn,tp]\n # ,estimators,col,sub,child_weight,pos_weight\n rows.append([depth,estimators,convert_matrix])\n index+=1\n\n for i in range(len(rows)):\n df = df.append({'Max_depth':rows[i][0],'N_estimators':rows[i][1],\n 'Confusion Matrix':rows[i][2]}, ignore_index=True)\n\n return df\n\n\ndef naive_bayes_subset(X_train,X_test,y_train,y_test, features):\n index =0\n df = pd.DataFrame(columns=['Gauss', 'Confusion Matrix'])\n rows = []\n\n gnb = GaussianNB()\n feature = features[index]\n index+=1\n X_train_gnb, X_test_gnb = X_train[:,feature], X_test[:,feature]\n gnb.fit(X_train_gnb, y_train)\n predicted_labels = gnb.predict(X_test_gnb)\n tn_g, fp_g, fn_g, tp_g = confusion_matrix(y_test, predicted_labels, labels=[0,1]).ravel()\n convert_matrix_g = [tn_g,fp_g,fn_g,tp_g]\n\n df = df.append({'Gauss':1, 'Confusion Matrix':convert_matrix_g}, ignore_index=True)\n\n bnb = BernoulliNB()\n feature = features[index]\n\n\n X_train_bnb, X_test_bnb = X_train[:,feature], X_test[:,feature]\n bnb.fit(X_train_bnb, y_train)\n predicted_labels = bnb.predict(X_test_bnb)\n tn, fp, fn, tp = confusion_matrix(y_test, predicted_labels, labels=[0,1]).ravel()\n convert_matrix_b = [tn,fp,fn,tp]\n\n df = df.append({'Gauss':0,'Confusion Matrix':convert_matrix_b}, ignore_index=True)\n\n\n return df\n\ndef classify_sfs(estimator, X_train, X_test, y_train, y_test, features):\n if estimator == 'svm':\n return SVM_subset(X_train, X_test, y_train, y_test, features)\n elif estimator == 'rdforest':\n return rdforest_subset(X_train, X_test, y_train, y_test, features)\n elif estimator == 'knn':\n return KNN_subset(X_train, X_test, y_train, y_test, features)\n elif estimator == 'naive_bayes':\n return naive_bayes_subset(X_train, X_test, y_train, y_test, features)\n elif estimator =='xgboost':\n return xgboost_subset(X_train, X_test, y_train, y_test, features)\n elif estimator == 'elasticnet':\n return elastic_subset(X_train, X_test, y_train, y_test, features)","sub_path":"sffs.py","file_name":"sffs.py","file_ext":"py","file_size_in_byte":8862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"546817652","text":"# -*- coding: utf-8 -*-\n# Default values for configuration variables\n\nPAYMENT_PRODUCTION = False\n\nWEPAY_ACCOUNT_ID = \"\"\nWEPAY_ACCESS_TOKEN = \"\"\n\nPAYPAL_PRIMARY_EMAIL = \"\"\nPAYPAL_BUSINESS = \"\"\n\nSTRIPE_KEYS = {\n \"SECRET\": \"\",\n \"PUBLISHABLE\": \"\",\n}\nSTRIPE_TEST_KEYS = {\n \"SECRET\": \"\",\n \"PUBLISHABLE\": \"\",\n}\n\n\nMUSICBRAINZ_BASE_URL = \"https://musicbrainz.org/\"\nMUSICBRAINZ_CLIENT_ID = \"\"\nMUSICBRAINZ_CLIENT_SECRET = \"\"\n\n\nADMINS = []\n\n\nRECAPTCHA_PUBLIC_KEY = \"\"\nRECAPTCHA_PRIVATE_KEY = \"\"\n\n\nNOTIFICATION_RECIPIENTS = []\n\n\nPREFERRED_URL_SCHEME = \"http\"\n","sub_path":"metabrainz/default_config.py","file_name":"default_config.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"455075309","text":"import io\nimport json\n\nfrom transformers.pipelines import pipeline\nfrom flask import Flask, jsonify, request\n\n\napp = Flask(__name__)\n# imagenet_class_index = json.load(open('/imagenet_class_index.json'))\nmodel_name = \"deepset/electra-base-squad2\"\nnlp = pipeline('question-answering', model=model_name, tokenizer=model_name)\n\n\"\"\"\nQA_input = {\n 'question': 'Why is model conversion important?', 'context': 'The option to convert models between FARM and transformers gives freedom to the user and let people easily switch between frameworks.'\n}\nres = nlp(QA_input)\n\"\"\"\ndef get_prediction(QA_input):\n res = nlp(QA_input)\n return res\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n if request.method == 'POST':\n QA_input = json.loads(request.data)\n answer = get_prediction(QA_input)\n return jsonify({'answer': answer['answer']})\n\n# curl --header \"Content-Type: application/json\" --request POST --data '{​​​​​\"question\":\"What is your name\",\"context\":\"Hello, my name is Rudy\"}​​​​​' http://localhost:5000/predict\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"ModelServing/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"308388679","text":"import requests\n\nnumber = 1\nwith open('image.txt', 'r') as file:\n\tfor line in file:\n\t\ta = line\n\t\tr = requests.get(a)\n\t\tfor i in line:\n\t\t\tk = str(number) + \".png\"\n\t\t\twith open(k, 'wb') as f1:\n\t\t\t\tf1.write(r.content)\n\t\t\tbreak\n\t\tnumber = number + 1\n\t\t\t\n\t\t\t\t\n\t\t\n\t\t\t\t\n\n\n","sub_path":"24.10/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"64884846","text":"import pandas as pd\r\nfrom bs4 import BeautifulSoup\r\n\r\ndf = pd.read_json(\"review_daejeo.json\", encoding='UTF-8')\r\n\r\n\r\ndef parser(body): #대저생태공원\r\n bs = BeautifulSoup(body, 'html.parser')\r\n user_name = bs.find('span', jstcache='368').text\r\n date = bs.find('span', jstcache='218').text\r\n ratingtemp = str(bs).split('개 \\\" ')[0]\r\n ratingtemp = ratingtemp.split('별표 ')[1]\r\n review_text = bs.find('span', jstcache='221').text\r\n\r\n return user_name, date, ratingtemp, review_text\r\n'''\r\ndef parser(body): #삼락생태공원\r\n bs = BeautifulSoup(body, 'html.parser')\r\n user_name = bs.find('span', jstcache='1390').text\r\n date = bs.find('span', jstcache='1240').text\r\n ratingorg = bs.find('span', class_='section-review-stars').text\r\n ratingtemp = str(bs).split('개 \\\" ')[0]\r\n ratingtemp = ratingtemp.split('별표 ')[1]\r\n review_text = bs.find('span', jstcache='1243').text\r\n\r\n return user_name, date, ratingtemp, review_text\r\n'''\r\ndf['user_name'], df['date'], df['rating'], df['review_text'] = zip(*df['body'].map(parser))\r\ndel df[\"body\"]\r\ndf = df.applymap(lambda x: x.replace('\\U0001f44d', '').replace('\\U0001f618', ''))\r\n\r\ndf.to_csv('googlemap_daejeo_review.csv', encoding='utf-8-sig')","sub_path":"googlemap/googlemap_review_crawling.py","file_name":"googlemap_review_crawling.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"368276132","text":"class MinHeap:\n def __init__(self,maxsize):\n self.a = [None]*maxsize\n self.n = 0\n self.a[0] = 9999999\n def insert(self,value):\n self.n += 1\n self.a[self.n] = value\n self.restore_up(self.n)\n def restore_up(self,i):\n k = self.a[i]\n iparent = i//2\n while self.a[iparent] > k and iparent >= 1: \n self.a[i] = self.a[iparent]\n i = iparent\n iparent = i // 2\n self.a[i] = k\n def delete(self):\n if self.n == 0:\n print('Heap is empty')\n minValue = self.a[1]\n self.a[1] = self.a[self.n]\n self.n -= 1\n self.restore_down(1)\n return minValue\n def restore_down(self,i):\n k = self.a[i]\n l = 2*i\n r = l+1\n while r <= self.n:\n if k <= self.a[l] and k <= self.a[r]:\n self.a[i] = k\n return\n else:\n if self.a[l] < self.a[r]:\n self.a[i] = self.a[l]\n i = l\n else:\n self.a[i] = self.a[r]\n i = r\n l = 2*i\n r = l+1\n \n #if number of nodes is even\n if l == self.n and k > self.a[l]:\n self.a[i] = self.a[l]\n i = l\n self.a[i] = k\n \n\n\n\n#h = Heap(n)\ndef mergeKsorted(lists):\n l = []\n tracker_a,tracker_b,tracker_c = 1,1,1\n h = MinHeap(len(lists)+1)\n h.insert(lists[0][0])\n h.insert(lists[1][0])\n h.insert(lists[2][0])\n\n m = h.delete()\n l.append(m)\n \n while (tracker_a <= len(lists[0])-1) and (tracker_b <= len(lists[0])-1) and (tracker_c <= len(lists[0])-1):\n if m in lists[0]:\n h.insert(lists[0][tracker_a])\n m = h.delete()\n l.append(m)\n tracker_a += 1\n elif m in lists[1]:\n h.insert(lists[1][tracker_b])\n m = h.delete()\n l.append(m)\n tracker_b += 1\n elif m in lists[2]:\n h.insert(lists[2][tracker_c])\n m = h.delete()\n l.append(m)\n tracker_c += 1\n \n return l\n\nprint(mergeKsorted([[1,3,5,7],[2,4,6,8],[0,9,10,11]]))\n","sub_path":"data structures and algos/HEAPS/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"445261282","text":"from PyQt5.QtGui import QPainter, QColor, QPainterPath, QBrush, QPen\nfrom PyQt5.Qt import QRectF\n\n\nclass Model:\n def __init__(self):\n super().__init__()\n self.borderThickness = 0\n self.borderColor = QColor()\n self.fillColor = QColor()\n self.path = QPainterPath()\n self.aabb = QRectF()\n return\n\n def draw(self, painter, transform):\n painter.save()\n painter.setTransform(transform, True)\n\n painter.setBrush(QBrush(self.fillColor))\n painter.setPen(QPen(self.borderColor, self.borderThickness))\n painter.drawPath(self.path)\n\n painter.restore()\n return\n\n @classmethod\n def create_triangle_model(cls, v1, v2, v3):\n model = cls()\n model.path.moveTo(v1[0], v1[1])\n model.path.lineTo(v2[0], v2[1])\n model.path.lineTo(v3[0], v3[1])\n model.path.closeSubpath()\n\n left = min(v1[0], v2[0], v3[0])\n right = max(v1[0], v2[0], v3[0])\n top = min(v1[1], v2[1], v3[1])\n bot = max(v1[1], v2[1], v3[1])\n\n model.aabb = QRectF(left, top, right - left, bot - top)\n\n return model\n\n @classmethod\n def create_polynomial_model(cls, vs):\n model = cls()\n model.path.moveTo(vs[len(vs)-1][0], vs[len(vs)-1][1])\n for v in vs:\n model.path.lineTo(v[0], v[1])\n model.path.closeSubpath()\n\n left = 10000\n right = -10000\n top = 10000\n bot = -10000\n for v in vs:\n left = min(left, v[0])\n right = max(right, v[0])\n top = min(top, v[1])\n bot = max(bot, v[1])\n\n model.aabb = QRectF(left, top, right - left, bot - top)\n\n return model\n\n\n # @classmethod\n # def create_polygon_model(cls, vs):\n # model = cls()\n # for v in vs:\n # model.path.moveTo(v[0], v[1])\n # model.path.closeSubpath()\n # return model\n\n\n","sub_path":"Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"260455568","text":"import pickle\nimport torch\nfrom torch import nn\nimport numpy as np\nimport torch.optim as optim\n\n\nclass EncoderDecoder(nn.Module):\n\n def __init__(self, num_input_features, num_output_features):\n\n super().__init__()\n self.w1 = nn.Parameter(torch.empty(num_input_features, num_output_features))\n nn.init.uniform_(self.w1, -0.1, 0.1)\n self.w2 = nn.Parameter(torch.empty(num_output_features, num_input_features))\n nn.init.uniform_(self.w2, -0.1, 0.1)\n \n def encode(self, x):\n return torch.mm(x, self.w1)\n\n def decode(self, x):\n return torch.mm(x, self.w2)\n\n def forward(self, x):\n\n encode_x = self.encode(x)\n decode_x = self.decode(encode_x)\n return decode_x\n\n\n\nif __name__ == \"__main__\":\n\n\n feature_indexer_file = \"feature_indexer.pkl\"\n feature_cache_file = \"features.pkl\"\n print(\"Loading features\")\n feature_indexer = pickle.load(open(feature_indexer_file, \"rb\"))\n feature_cache = pickle.load(open(feature_cache_file, \"rb\"))\n\n\n feature_indexer_file = \"german_feature_indexer.pkl\"\n feature_cache_file = \"german_features.pkl\"\n german_feature_indexer = pickle.load(open(feature_indexer_file, \"rb\"))\n german_feature_cache = pickle.load(open(feature_cache_file, \"rb\"))\n\n\n num_features = 300\n\n model = EncoderDecoder(len(german_feature_indexer), num_features)\n\n lr = 0.01\n optimizer = optim.Adam(model.parameters(), lr)\n loss_fn = torch.nn.SmoothL1Loss()\n\n \n\n total_loss = 0.0\n total_count = 0.0\n\n\n i = 0\n j = 0\n\n while i<1500:\n all_x = []\n \n \n\n \n\n \n if i<=j:\n all_indices = np.array(feature_cache[i])\n for indices in all_indices:\n x = np.zeros((indices.shape[0], len(german_feature_indexer) ))\n for ind1 in range(indices.shape[0]):\n for ind2 in range(indices.shape[1]):\n x[ind1, indices[ind1][ind2]] = 1\n \n all_x.append(x)\n\n if len(all_x) >= 2:\n break\n \n\n \n i += 1\n\n else: \n all_indices = np.array(german_feature_cache[j]) \n for indices in all_indices:\n x = np.zeros((indices.shape[0], len(german_feature_indexer) ))\n for ind1 in range(indices.shape[0]):\n for ind2 in range(indices.shape[1]):\n x[ind1, indices[ind1][ind2]] = 1\n \n all_x.append(x)\n\n if len(all_x) >= 2:\n break \n \n j += 1\n \n all_x = np.stack(all_x)\n all_x = np.reshape(all_x, (-1, len(german_feature_indexer)))\n\n\n if all_x.shape[0] > 10:\n all_x = all_x[0:10, :]\n\n print(all_x.shape)\n\n model.zero_grad()\n x_tensor = torch.from_numpy(all_x).float()\n recon_x = model(x_tensor)\n loss = loss_fn(recon_x, x_tensor)\n print(loss.data)\n total_loss += loss.data\n total_count += 1\n loss.backward()\n optimizer.step()\n torch.save(model, \"multi_lingual.embedder\")\n \n if(i%10==0):\n print(\"{} done\".format(i))\n\n # embedder = torch.load(\"multi_lingual.embedder\")\n # all_embedded = []\n # for i in range(len(feature_cache)):\n\n # all_indices = np.array(feature_cache[i])\n\n # all_x = []\n # for indices in all_indices:\n # x = np.zeros((indices.shape[0], len(feature_indexer)))\n # for ind1 in range(indices.shape[0]):\n # for ind2 in range(indices.shape[1]):\n # x[ind1, indices[ind1][ind2]] = 1\n # all_x.append(x)\n # all_x = np.stack(all_x)\n # all_x = np.reshape(all_x, (-1, len(feature_indexer)))\n # x_tensor = torch.from_numpy(all_x).float()\n # embedded_x = embedder.encode(x_tensor).data\n # embedded_x = np.reshape(embedded_x, (-1, 9, 300))\n # print(embedded_x.shape)\n # all_embedded.append(embedded_x)\n \n # pickle.dump(all_embedded, open(\"embedded.cache\", \"wb\"))\n \n \n # print(\"epoch : {}, loss {}\".format(epoch, total_loss/total_count))\n\n \n \n\n\n","sub_path":"proj1/mulitlingual_embedder.py","file_name":"mulitlingual_embedder.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"399263228","text":"import copy\n\ns = list(map(int, input().split()))\n\ni = 0\n\nli = []\nli2 = []\n\nwhile i < s[0]:\n s1 = list(input())\n li.append(s1)\n i += 1\nli2 = copy.deepcopy(li)\ni = 0\n\n\nif s[0] > s[1] :\n min_s = s[1]\nelse :\n min_s = s[0]\n\nn = 0\nresult = [1]\n\nwhile n < min_s :\n li = copy.deepcopy(li2)\n\n for i in li :\n for j in i :\n if (i.index(j)+n) < s[1] :\n if (li.index(i)+n) < s[0] :\n if j == i[(i.index(j)+n) % s[1]] :\n if j == li[(li.index(i)+n) % s[0]][i.index(j)] :\n if j == li[(li.index(i)+n) % s[0]][(i.index(j)+n) % s[1]] :\n result.append((n+1)*(n+1)) # 여기네 마지막에서 3*3을 무조건 하구나.\n li[li.index(i)][i.index(j)] = '확인'\n n +=1\n\nprint(max(result))\n\n ","sub_path":"200801/bj_1051.py","file_name":"bj_1051.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"143011877","text":"\"\"\"\nCopyright 2020 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\n__author__ = ['davidharcombe@google.com (David Harcombe)']\n\nfrom googleapiclient.discovery import Resource\nfrom classes.sa360_report_validation.sa360_field_validator import SA360Validator\n\n\nclass PaidAndOrganic(SA360Validator):\n\n def __init__(self,\n sa360_service: Resource = None,\n agency: int = None,\n advertiser: int = None) -> None:\n super().__init__(sa360_service, agency, advertiser)\n self.fields = [\n \"agency\",\n \"agencyId\",\n \"advertiser\",\n \"advertiserId\",\n \"account\",\n \"accountId\",\n \"accountEngineId\",\n \"accountType\",\n \"searchQuery\",\n \"serpType\",\n \"paidClicks\",\n \"organicClicks\",\n \"paidAndOrganicClicks\",\n \"paidImpressions\",\n \"organicQueries\",\n \"paidAndOrganicQueries\",\n \"paidCtr\",\n \"organicCtr\",\n \"paidAndOrganicCtr\",\n \"paidAvgPos\",\n \"organicAvgPos\",\n \"paidCostPerClick\",\n \"date\",\n \"monthStart\",\n \"monthEnd\",\n \"quarterStart\",\n \"quarterEnd\",\n \"weekStart\",\n \"weekEnd\",\n \"yearStart\",\n \"yearEnd\",\n \"campaign\",\n \"campaignId\",\n \"adGroup\",\n \"adGroupId\",\n \"keywordId\",\n \"keywordMatchType\",\n \"keywordText\",\n ]","sub_path":"application/classes/sa360_report_validation/paid_and_organic.py","file_name":"paid_and_organic.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"449160924","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\"\"\"This module implements the behavior of `az capi` commands.\"\"\"\n\n# pylint: disable=missing-docstring\n\nimport base64\nimport json\nimport os\nimport platform\nimport random\nimport re\nimport stat\nimport string\nimport subprocess\nimport time\n\nfrom jinja2 import Environment, PackageLoader\nfrom knack.log import get_logger\nfrom knack.prompting import prompt_choice_list, prompt_y_n\nfrom six.moves.urllib.request import urlopen # pylint: disable=import-error\n\nfrom azure.cli.core import get_default_cli\nfrom azure.cli.core.azclierror import FileOperationError\nfrom azure.cli.core.azclierror import InvalidArgumentValueError\nfrom azure.cli.core.azclierror import RequiredArgumentMissingError\nfrom azure.cli.core.azclierror import ResourceNotFoundError\nfrom azure.cli.core.azclierror import UnclassifiedUserFault\nfrom azure.cli.core.azclierror import ValidationError\nfrom azure.cli.core.api import get_config_dir\n\nfrom ._helpers import ssl_context, urlretrieve\n\nfrom ._params import _get_default_install_location\n\n\nlogger = get_logger(__name__) # pylint: disable=invalid-name\n\n\ndef init_environment(cmd):\n check_preqreqs(cmd, install=True)\n # Create a management cluster if needed\n try:\n find_management_cluster_retry(cmd.cli_ctx)\n except ResourceNotFoundError as err:\n if str(err) == \"No CAPZ installation found\":\n _install_capz_components()\n except subprocess.CalledProcessError:\n providers = ['AKS - a managed cluster in Azure',\n 'kind - a local docker-based cluster', \"exit - don't create a management cluster\"]\n prompt = \"\"\"\\\nNo Kubernetes cluster was found using the default configuration.\n\nCluster API needs a \"management cluster\" to run its components.\nLearn more from the Cluster API Book:\n https://cluster-api.sigs.k8s.io/user/concepts.html\n\nWhere should we create a management cluster?\n\"\"\"\n choice_index = prompt_choice_list(prompt, providers)\n random_id = ''.join(random.choices(\n string.ascii_lowercase + string.digits, k=6))\n cluster_name = \"capi-manager-\" + random_id\n if choice_index == 0:\n logger.info(\"AKS management cluster\")\n cmd = [\"az\", \"group\", \"create\", \"-l\",\n \"southcentralus\", \"--name\", cluster_name]\n try:\n output = subprocess.check_output(cmd, universal_newlines=True)\n logger.info(\"%s returned:\\n%s\", \" \".join(cmd), output)\n except subprocess.CalledProcessError as err:\n raise UnclassifiedUserFault(err)\n cmd = [\"az\", \"aks\", \"create\", \"-g\",\n cluster_name, \"--name\", cluster_name]\n try:\n output = subprocess.check_output(cmd, universal_newlines=True)\n logger.info(\"%s returned:\\n%s\", \" \".join(cmd), output)\n except subprocess.CalledProcessError as err:\n raise UnclassifiedUserFault(err)\n elif choice_index == 1:\n logger.info(\"kind management cluster\")\n # Install kind\n kind_path = \"kind\"\n if not which(\"kind\"):\n kind_path = install_kind(cmd)\n cmd = [kind_path, \"create\", \"cluster\", \"--name\", cluster_name]\n try:\n output = subprocess.check_output(cmd, universal_newlines=True)\n logger.info(\"%s returned:\\n%s\", \" \".join(cmd), output)\n except subprocess.CalledProcessError as err:\n raise UnclassifiedUserFault(err)\n else:\n return\n _install_capz_components()\n\n\ndef _install_capz_components():\n os.environ[\"EXP_MACHINE_POOL\"] = \"true\"\n os.environ[\"EXP_CLUSTER_RESOURCE_SET\"] = \"true\"\n command = [\"clusterctl\", \"init\", \"--infrastructure\", \"azure\"]\n try:\n output = subprocess.check_output(command, universal_newlines=True)\n logger.info(\"%s returned:\\n%s\", \" \".join(command), output)\n except subprocess.CalledProcessError as err:\n raise UnclassifiedUserFault(\"Can't locate a Kubernetes cluster\") from err\n\n\ndef create_management_cluster(cmd):\n # TODO: add user confirmation\n check_preqreqs(cmd)\n\n command = [\"clusterctl\", \"init\", \"--infrastructure\", \"azure\"]\n try:\n output = subprocess.check_output(command, universal_newlines=True)\n logger.info(\"%s returned:\\n%s\", \" \".join(command), output)\n except subprocess.CalledProcessError as err:\n raise UnclassifiedUserFault(\"Can't locate a Kubernetes cluster\") from err\n\n\ndef delete_management_cluster(cmd): # pylint: disable=unused-argument\n # TODO: add user confirmation\n command = [\"clusterctl\", \"delete\", \"--all\",\n \"--include-crd\", \"--include-namespace\"]\n try:\n output = subprocess.check_output(command, universal_newlines=True)\n logger.info(\"%s returned:\\n%s\", \" \".join(command), output)\n except subprocess.CalledProcessError as err:\n raise UnclassifiedUserFault(err)\n namespaces = [\n \"capi-kubeadm-bootstrap-system\",\n \"capi-kubeadm-control-plane-system\",\n \"capi-system\",\n \"capi-webhook-system\",\n \"capz-system\",\n \"cert-manager\",\n ]\n command = [\"kubectl\", \"delete\", \"namespace\", \"--ignore-not-found\"] + namespaces\n try:\n output = subprocess.check_output(command, universal_newlines=True)\n logger.info(\"%s returned:\\n%s\", \" \".join(command), output)\n except subprocess.CalledProcessError as err:\n raise UnclassifiedUserFault(err)\n\n\ndef move_management_cluster(cmd):\n raise NotImplementedError\n\n\ndef show_management_cluster(_cmd, yes=False):\n # TODO: check to see if a management cluster is specified in the config\n config = get_default_cli().config\n # Config can also be set by the AZURE_CAPI_KUBECONFIG environment variable.\n kubeconfig = config.get(\"capi\", \"kubeconfig\",\n fallback=os.environ.get(\"KUBECONFIG\"))\n if not kubeconfig:\n raise InvalidArgumentValueError(\"no kubeconfig\")\n # make a $HOME/.azure/capi directory for storing cluster configurations\n path = os.path.join(get_config_dir(), \"capi\")\n if not os.path.exists(path):\n os.makedirs(path)\n # TODO: if not\n command = [\"kubectl\", \"config\", \"get-contexts\",\n \"--no-headers\", \"--output\", \"name\"]\n try:\n output = subprocess.check_output(command, universal_newlines=True)\n logger.info(\"%s returned:\\n%s\", \" \".join(command), output)\n contexts = output.splitlines()\n logger.info(contexts)\n except subprocess.CalledProcessError as err:\n raise UnclassifiedUserFault(err)\n\n msg = path + \"ok\"\n if not yes and prompt_y_n(msg, default=\"n\"):\n logger.info(\"yes\")\n # TODO: echo details of the management cluster in all output formats\n\n\ndef update_management_cluster(cmd):\n # Check for local prerequisites\n check_preqreqs(cmd)\n cmd = [\n \"clusterctl\",\n \"upgrade\",\n \"apply\",\n \"--management-group\",\n \"capi-system/cluster-api\",\n \"--contract\",\n \"v1alpha3\",\n ]\n try:\n output = subprocess.check_output(cmd, universal_newlines=True)\n logger.info(\"%s returned:\\n%s\", \" \".join(cmd), output)\n except subprocess.CalledProcessError as err:\n raise UnclassifiedUserFault(err)\n\n\ndef create_workload_cluster( # pylint: disable=unused-argument,too-many-arguments,too-many-locals\n cmd,\n resource_group_name,\n capi_name,\n location=None,\n control_plane_machine_type=os.environ.get(\n \"AZURE_CONTROL_PLANE_MACHINE_TYPE\"),\n control_plane_machine_count=os.environ.get(\n \"AZURE_CONTROL_PLANE_MACHINE_COUNT\", 3),\n node_machine_type=os.environ.get(\"AZURE_NODE_MACHINE_TYPE\"),\n node_machine_count=os.environ.get(\"AZURE_NODE_MACHINE_COUNT\", 3),\n kubernetes_version=os.environ.get(\"AZURE_KUBERNETES_VERSION\", \"1.20.2\"),\n # azure_cloud=os.environ.get(\"AZURE_ENVIRONMENT\", \"AzurePublicCloud\"),\n subscription_id=os.environ.get(\"AZURE_SUBSCRIPTION_ID\"),\n ssh_public_key=os.environ.get(\"AZURE_SSH_PUBLIC_KEY_B64\", \"\"),\n vnet_name=None,\n machinepool=False,\n ephemeral_disks=False,\n windows=False,\n yes=False):\n # Generate the cluster configuration\n env = Environment(loader=PackageLoader(\n __name__, \"templates\"), auto_reload=False)\n logger.info(\"Available templates: %s\", env.list_templates())\n template = env.get_template(\"base.jinja\")\n\n args = {\n \"AZURE_CONTROL_PLANE_MACHINE_TYPE\": control_plane_machine_type,\n \"AZURE_LOCATION\": location,\n \"AZURE_NODE_MACHINE_TYPE\": node_machine_type,\n \"AZURE_RESOURCE_GROUP\": resource_group_name,\n \"AZURE_SUBSCRIPTION_ID\": subscription_id,\n \"AZURE_SSH_PUBLIC_KEY_B64\": ssh_public_key,\n \"AZURE_VNET_NAME\": vnet_name,\n \"CLUSTER_NAME\": capi_name,\n \"KUBERNETES_VERSION\": kubernetes_version,\n \"EPHEMERAL\": ephemeral_disks,\n \"WINDOWS\": windows,\n \"NODEPOOL_TYPE\": \"machinepool\" if machinepool else \"machinedeployment\",\n }\n manifest = template.render(args)\n\n # TODO: Some CAPZ options need to be set as env vars, not clusterctl arguments.\n # os.environ.update(\n # {\n # \"AZURE_CONTROL_PLANE_MACHINE_TYPE\": control_plane_machine_type,\n # \"AZURE_NODE_MACHINE_TYPE\": node_machine_type,\n # \"AZURE_LOCATION\": location,\n # \"AZURE_ENVIRONMENT\": azure_cloud,\n # }\n # )\n filename = capi_name + \".yaml\"\n with open(filename, \"w\") as manifest_file:\n manifest_file.write(manifest)\n logger.warning(\"wrote manifest file to %s\", filename)\n\n msg = 'Do you want to create this Kubernetes cluster \"{}\" in the Azure resource group \"{}\"?'.format(\n capi_name, resource_group_name)\n if yes or prompt_y_n(msg, default=\"n\"):\n init_environment(cmd)\n\n # Prompt to create resource group if it doesn't exist\n from azure.cli.core.commands.client_factory import get_mgmt_service_client\n from azure.cli.core.profiles import ResourceType\n\n resource_client = get_mgmt_service_client(\n cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)\n if not resource_client.resource_groups.check_existence(resource_group_name):\n logger.warning(\"Couldn't find the specified resource group.\")\n if not location:\n raise RequiredArgumentMissingError(\n 'Please specify a location so a resource group can be created.')\n create = yes\n if not create:\n msg = 'Do you want to create a new resource group named \"{}\" in Azure\\'s {} region?'.format(\n resource_group_name, location)\n create = prompt_y_n(msg, default=\"n\")\n if create:\n rg_model = resource_client.models().ResourceGroup\n # TODO: add tags to resource group?\n parameters = rg_model(location=location)\n output = resource_client.resource_groups.create_or_update(\n resource_group_name, parameters)\n logger.info(output)\n logger.warning(\"Created resource group %s in %s.\", resource_group_name, location)\n # Check for general prerequisites\n # init_environment(cmd)\n # Identify or create a Kubernetes v1.16+ management cluster\n find_management_cluster_retry(cmd.cli_ctx)\n\n # Apply the cluster configuration\n cmd = [\"kubectl\", \"apply\", \"-f\", filename]\n try:\n output = subprocess.check_output(cmd, universal_newlines=True)\n logger.info(\"%s returned:\\n%s\", \" \".join(cmd), output)\n except subprocess.CalledProcessError as err:\n raise UnclassifiedUserFault(err)\n # TODO: create RG for user with AAD Pod ID scoped to it\n\n\ndef delete_workload_cluster(cmd):\n raise NotImplementedError\n\n\ndef list_workload_clusters(cmd):\n cmd = [\"kubectl\", \"get\", \"clusters\", \"-o\", \"json\"]\n try:\n output = subprocess.check_output(cmd, universal_newlines=True)\n logger.info(\"%s returned:\\n%s\", \" \".join(cmd), output)\n except subprocess.CalledProcessError as err:\n raise UnclassifiedUserFault(err)\n return json.loads(output)\n\n\ndef show_workload_cluster(cmd, name): # pylint: disable=unused-argument\n # TODO: --output=table should print the output of `clusterctl describe` directly.\n # command = [\"clusterctl\", \"describe\", \"cluster\", name]\n command = [\"kubectl\", \"get\", \"cluster\", name, \"--output\", \"json\"]\n try:\n output = subprocess.check_output(command, stderr=subprocess.STDOUT, universal_newlines=True)\n logger.info(\"%s returned:\\n%s\", \" \".join(command), output)\n except subprocess.CalledProcessError as err:\n raise UnclassifiedUserFault(err)\n return json.loads(output)\n\n\ndef update_workload_cluster(cmd):\n raise NotImplementedError\n\n\ndef check_preqreqs(cmd, install=False):\n # Install kubectl\n if not which(\"kubectl\") and install:\n install_kubectl(cmd)\n\n # Install clusterctl\n if not which(\"clusterctl\") and install:\n install_clusterctl(cmd)\n\n # Check for required environment variables\n # TODO: remove this when AAD Pod Identity becomes the default\n for var in [\"AZURE_CLIENT_ID\", \"AZURE_CLIENT_SECRET\", \"AZURE_SUBSCRIPTION_ID\", \"AZURE_TENANT_ID\"]:\n check_environment_var(var)\n\n\ndef check_environment_var(var):\n var_b64 = var + \"_B64\"\n val = os.environ.get(var_b64)\n if val:\n logger.info(\"Found environment variable %s\", var_b64)\n else:\n try:\n val = os.environ[var]\n except KeyError as err:\n raise RequiredArgumentMissingError(\"Required environment variable {} was not found.\".format(err))\n # Set the base64-encoded variable as a convenience\n val = base64.b64encode(val.encode(\"utf-8\")).decode(\"ascii\")\n os.environ[var_b64] = val\n logger.info(\"Set environment variable %s from %s\", var_b64, var)\n\n\ndef find_management_cluster_retry(cli_ctx, delay=3):\n hook = cli_ctx.get_progress_controller(True)\n hook.add(message='Waiting for CAPI components to be running',\n value=0, total_val=1.0)\n logger.info('Waiting for CAPI components to be running')\n for i in range(0, 10):\n hook.add(message='Waiting for CAPI components to be running',\n value=0.1 * i, total_val=1.0)\n try:\n find_management_cluster()\n break\n except ResourceNotFoundError:\n time.sleep(delay + delay * i)\n else:\n return False\n hook.add(message='CAPI components are running', value=1.0, total_val=1.0)\n logger.info('CAPI components are running')\n return True\n\n\ndef find_management_cluster():\n cmd = [\"kubectl\", \"cluster-info\"]\n match = check_cmd(cmd, r\"Kubernetes control plane.*?is running\")\n if match is None:\n raise ResourceNotFoundError(\"No accessible Kubernetes cluster found\")\n cmd = [\"kubectl\", \"get\", \"pods\", \"--namespace\", \"capz-system\"]\n try:\n match = check_cmd(cmd, r\"capz-controller-manager-.+?Running\")\n if match is None:\n raise ResourceNotFoundError(\"No CAPZ installation found\")\n except subprocess.CalledProcessError as err:\n logger.error(err)\n cmd = [\"kubectl\", \"get\", \"pods\", \"--namespace\", \"capi-webhook-system\"]\n try:\n match = check_cmd(cmd, r\"capz-controller-manager-.+?Running\")\n if match is None:\n raise ResourceNotFoundError(\"No CAPZ installation found\")\n except subprocess.CalledProcessError as err:\n logger.error(err)\n\n\ndef check_cmd(command, regexp=None):\n output = subprocess.check_output(command, universal_newlines=True)\n logger.info(\"%s returned:\\n%s\", \" \".join(command), output)\n if regexp is not None:\n return re.search(regexp, output)\n return False\n\n\ndef which(binary):\n path_var = os.getenv(\"PATH\")\n\n if platform.system() == \"Windows\":\n binary += \".exe\"\n parts = path_var.split(\";\")\n else:\n parts = path_var.split(\":\")\n\n for part in parts:\n bin_path = os.path.join(part, binary)\n if os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):\n return bin_path\n\n return None\n\n\ndef install_clusterctl(_cmd, client_version=\"latest\", install_location=None, source_url=None):\n \"\"\"\n Install clusterctl, a command-line interface for Cluster API Kubernetes clusters.\n \"\"\"\n\n if not source_url:\n source_url = \"https://github.com/kubernetes-sigs/cluster-api/releases/\"\n # TODO: mirror clusterctl binary to Azure China cloud--see install_kubectl().\n\n if client_version != \"latest\":\n source_url += \"tags/\"\n source_url += \"{}/download/clusterctl-{}-amd64\"\n\n file_url = \"\"\n system = platform.system()\n if system in (\"Darwin\", \"Linux\"):\n file_url = source_url.format(client_version, system.lower())\n else: # TODO: support Windows someday?\n raise ValidationError(\n 'The clusterctl binary is not available for \"{}\"'.format(system))\n\n # ensure installation directory exists\n if install_location is None:\n install_location = _get_default_install_location(\"clusterctl\")\n install_dir, cli = os.path.dirname(install_location), os.path.basename(\n install_location\n )\n if not os.path.exists(install_dir):\n os.makedirs(install_dir)\n\n logger.warning('Downloading client to \"%s\" from \"%s\"',\n install_location, file_url)\n try:\n urlretrieve(file_url, install_location)\n perms = (os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)\n os.chmod(install_location, perms)\n except IOError as ex:\n err_msg = \"Connection error while attempting to download client ({})\".format(ex)\n raise FileOperationError(err_msg)\n\n logger.warning(\n \"Please ensure that %s is in your search PATH, so the `%s` command can be found.\",\n install_dir,\n cli,\n )\n\n\ndef install_kind(_cmd, client_version=\"v0.10.0\", install_location=None, source_url=None):\n \"\"\"\n Install kind, a container-based Kubernetes environment for development and testing.\n \"\"\"\n\n if not source_url:\n source_url = \"https://kind.sigs.k8s.io/dl/{}/kind-{}-amd64\"\n\n # ensure installation directory exists\n if install_location is None:\n install_location = _get_default_install_location(\"kind\")\n install_dir, cli = os.path.dirname(install_location), os.path.basename(\n install_location\n )\n if not os.path.exists(install_dir):\n os.makedirs(install_dir)\n\n file_url = \"\"\n system = platform.system()\n if system == \"Windows\":\n file_url = source_url.format(client_version, \"windows\")\n elif system == \"Linux\":\n file_url = source_url.format(client_version, \"linux\")\n elif system == \"Darwin\":\n file_url = source_url.format(client_version, \"darwin\")\n else:\n raise InvalidArgumentValueError('System \"{}\" is not supported by kind.'.format(system))\n\n logger.warning('Downloading client to \"%s\" from \"%s\"',\n install_location, file_url)\n try:\n urlretrieve(file_url, install_location)\n os.chmod(\n install_location,\n os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH\n )\n except IOError as ex:\n raise FileOperationError(\n \"Connection error while attempting to download client ({})\".format(\n ex)\n )\n\n if system == \"Windows\":\n # be verbose, as the install_location likely not in Windows's search PATHs\n env_paths = os.environ[\"PATH\"].split(\";\")\n found = next(\n (x for x in env_paths if x.lower().rstrip(\"\\\\\") == install_dir.lower()),\n None,\n )\n if not found:\n # pylint: disable=logging-format-interpolation\n logger.warning(\n 'Please add \"{0}\" to your search PATH so the `{1}` can be found. 2 options: \\n'\n ' 1. Run \"set PATH=%PATH%;{0}\" or \"$env:path += \\'{0}\\'\" for PowerShell. '\n \"This is good for the current command session.\\n\"\n \" 2. Update system PATH environment variable by following \"\n '\"Control Panel->System->Advanced->Environment Variables\", and re-open the command window. '\n \"You only need to do it once\".format(install_dir, cli)\n )\n else:\n logger.warning(\n \"Please ensure that %s is in your search PATH, so the `%s` command can be found.\",\n install_dir,\n cli,\n )\n return install_location\n\n\ndef install_kubectl(cmd, client_version=\"latest\", install_location=None, source_url=None):\n \"\"\"\n Install kubectl, a command-line interface for Kubernetes clusters.\n \"\"\"\n\n if not source_url:\n source_url = \"https://storage.googleapis.com/kubernetes-release/release\"\n cloud_name = cmd.cli_ctx.cloud.name\n if cloud_name.lower() == \"azurechinacloud\":\n source_url = \"https://mirror.azure.cn/kubernetes/kubectl\"\n\n if client_version == \"latest\":\n context = ssl_context()\n version = urlopen(source_url + \"/stable.txt\", context=context).read()\n client_version = version.decode(\"UTF-8\").strip()\n else:\n client_version = \"v%s\" % client_version\n\n file_url = \"\"\n system = platform.system()\n base_url = source_url + \"/{}/bin/{}/amd64/{}\"\n\n # ensure installation directory exists\n if install_location is None:\n install_location = _get_default_install_location(\"kubectl\")\n install_dir, cli = os.path.dirname(install_location), os.path.basename(\n install_location\n )\n if not os.path.exists(install_dir):\n os.makedirs(install_dir)\n\n if system == \"Windows\":\n file_url = base_url.format(client_version, \"windows\", \"kubectl.exe\")\n elif system == \"Linux\":\n # TODO: Support ARM CPU here\n file_url = base_url.format(client_version, \"linux\", \"kubectl\")\n elif system == \"Darwin\":\n file_url = base_url.format(client_version, \"darwin\", \"kubectl\")\n else:\n raise InvalidArgumentValueError(\n \"Proxy server ({}) does not exist on the cluster.\".format(system)\n )\n\n logger.warning('Downloading client to \"%s\" from \"%s\"',\n install_location, file_url)\n try:\n urlretrieve(file_url, install_location)\n os.chmod(\n install_location,\n os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH\n )\n except IOError as ex:\n err_msg = \"Connection error while attempting to download client ({})\".format(ex)\n raise FileOperationError(err_msg) from ex\n\n if system == \"Windows\":\n # be verbose, as the install_location is likely not in Windows's search PATHs\n env_paths = os.environ[\"PATH\"].split(\";\")\n found = next(\n (x for x in env_paths if x.lower().rstrip(\"\\\\\") == install_dir.lower()),\n None,\n )\n if not found:\n # pylint: disable=logging-format-interpolation\n logger.warning(\n 'Please add \"{0}\" to your search PATH so the `{1}` can be found. 2 options: \\n'\n ' 1. Run \"set PATH=%PATH%;{0}\" or \"$env:path += \\'{0}\\'\" for PowerShell. '\n \"This is good for the current command session.\\n\"\n \" 2. Update system PATH environment variable by following \"\n '\"Control Panel->System->Advanced->Environment Variables\", and re-open the command window. '\n \"You only need to do it once\".format(install_dir, cli)\n )\n else:\n logger.warning(\n \"Please ensure that %s is in your search PATH, so the `%s` command can be found.\",\n install_dir,\n cli,\n )\n","sub_path":"src/capi/azext_capi/custom.py","file_name":"custom.py","file_ext":"py","file_size_in_byte":24272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"363511290","text":"# Method 1:\n\n# import cv2\n# img = cv2.imread(\"img/img1.jpg\")\n# cv2.imshow(\"output\",img)\n# gray_img =cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n# cv2.imshow(\"gray image\", gray_img)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()\n\n# Method 2:\n\nimport cv2\nimg = cv2.imread(\"img/img1.jpg\",0)\ncv2.imshow(\"gray img\",img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"opencv/tuts/tut_4_gray_scaled_black_white_img.py","file_name":"tut_4_gray_scaled_black_white_img.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"438148001","text":"#!/usr/bin/env python3\n\"\"\"module\"\"\"\nimport numpy as np\n\n\nclass DeepNeuralNetwork:\n \"\"\"\n class DeepNeuralNetwork that defines\n a deep neural network performing binary\n classification\n \"\"\"\n def __init__(self, nx, layers):\n if type(nx) is not int:\n raise TypeError(\"nx must be an integer\")\n if nx <= 0:\n raise ValueError(\"nx must be a positive integer\")\n if type(layers) is not list or len(layers) == 0:\n raise TypeError(\"layers must be a list of positive integers\")\n\n self.__L = len(layers)\n self.__cache = {}\n self.__weights = {}\n\n for p_i in range(len(layers)):\n if (type(layers[p_i]) is not int or layers[p_i] < 1):\n raise TypeError(\"layers must be a list of positive integers\")\n key_W = \"W{}\".format(p_i + 1)\n key_b = \"b{}\".format(p_i + 1)\n if p_i == 0:\n w = np.random.randn(layers[p_i], nx) * np.sqrt(2 / nx)\n self.weights[key_W] = w\n else:\n heteal_2 = np.sqrt(2 / layers[p_i - 1])\n w = np.random.randn(layers[p_i], layers[p_i - 1]) * heteal_2\n self.weights[key_W] = w\n b = np.zeros((layers[p_i], 1))\n self.weights[key_b] = b\n\n @property\n def L(self):\n \"\"\"getter L1\"\"\"\n return self.__L\n\n @property\n def cache(self):\n \"\"\"getter cache\"\"\"\n return self.__cache\n\n @property\n def weights(self):\n \"\"\"getter weights\"\"\"\n return self.__weights\n\n def forward_prop(self, X):\n \"\"\"Calculates the forward propagation of the neural network\"\"\"\n self.__cache['A0'] = X\n for l in range(self.__L):\n z1 = np.matmul(self.__weights['W' + str(l + 1)],\n self.__cache['A' + str(l)])\n Z = z1 + self.__weights['b' + str(l + 1)]\n self.__cache['A' + str(l + 1)] = 1 / (1 + np.exp(-Z))\n\n return(self.__cache['A' + str(l + 1)], self.__cache)\n","sub_path":"supervised_learning/0x01-classification/18-deep_neural_network.py","file_name":"18-deep_neural_network.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"514652160","text":"# coding=utf-8\n\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.context_processors import csrf\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\n\n# from django.views.generic import View, TemplateView\n# from django.utils.decorators import method_decorator\n\nfrom .models import Project, Reply\nfrom .forms import CreateProjectForm, FilterStatusForm, AddProjectAttachmentForm, PerformersReplyForm\n\n\ndef all_projects(request):\n projects = Project.objects.filter(status='N').filter(privacy_type='PU').order_by('-pub_date')\n \"\"\"if request.method == 'POST':\n status_form = FilterStatusForm(request.POST)\n if status_form.is_valid():\n if status_form.cleaned_data['status_type'] == '*':\n projects = Project.objects.all().filter(privacy_type='PU')\n else:\n projects = Project.objects.filter(\n status=status_form.cleaned_data['status_type']\n ).filter(\n privacy_type='PU')\n args['filter_status_form'] = FilterStatusForm()\"\"\"\n args = {}\n args.update(csrf(request))\n args['projects'] = projects\n \n return render_to_response('projects_list.html', args)\n\ndef my_projects(request):\n me = request.user\n projects = Project.objects.filter(author_id=me.id).order_by('-pub_date')\n args = {}\n args['projects'] = projects\n \n return render_to_response('my_projects.html', args)\n\n@login_required\ndef get_project(request, project_id):\n me = request.user\n project = Project.objects.get(project_id=project_id)\n skills = project.skill_req.all()\n performers = project.performers.all()\n replies = Reply.objects.filter(project=project_id)\n args = {}\n args['project'] = project\n args['skills'] = skills\n args['performers'] = performers\n args['replies'] = replies\n if User.objects.get(username=me).id == project.author_id:\n args['is_mine'] = True\n return render_to_response('get_project.html', args)\n\n\n@login_required\ndef create_project(request):\n me = request.user\n if request.method == 'POST':\n tmp_form = CreateProjectForm(request.POST)\n if tmp_form.is_valid():\n form = tmp_form.save(commit=False)\n form.author = me\n form.save()\n tmp_form.save_m2m()\n return HttpResponseRedirect(reverse('projects:getProject', args=[form.project_id]))\n else:\n errors = tmp_form.errors\n args = {}\n args['me'] = me\n args['errors'] = errors\n return render_to_response('create_project.html', args)\n else:\n form = CreateProjectForm()\n args = {}\n args.update(csrf(request))\n args['me'] = me\n args['project_create_form'] = form\n return render_to_response('create_project.html', args)\n\n\ndef add_attachement_to_project(request, project_id):\n project = Project.objects.get(project_id=project_id)\n if request.method == 'POST':\n tmp_form = AddProjectAttachmentForm(request.POST, request.FILES)\n if tmp_form.is_valid():\n form = tmp_form.save(commit=False)\n form.project = project\n form.save()\n return HttpResponseRedirect(reverse('projects:getProject', args=[project_id]))\n else:\n errors = tmp_form.errors\n args = {}\n args['errors'] = errors\n HttpResponse('Ошибка в заполнение полей')\n else:\n form = AddProjectAttachmentForm()\n args = {}\n args.update(csrf(request))\n args['add_project_attachment_form'] = form\n render_to_response('add_attachement_to_project.html', args)\n\n\n@login_required\ndef edit_project(request, project_id):\n me = request.user\n me_id = User.objects.get(username=me).id\n try:\n project = Project.objects.filter(author=me_id).get(project_id=project_id)\n except ObjectDoesNotExist:\n raise Http404('Можно править только свои проекты')\n\n # skills = project.skill_req.all()\n # performance = project.performers.all()\n if request.method == 'POST':\n form = CreateProjectForm(request.POST, instance=project)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('projects:index'))\n else:\n errors = form.errors\n args = {}\n args['me'] = me\n args['project'] = project\n args['errors'] = errors\n return render_to_response('edit_project.html', args)\n else:\n form = CreateProjectForm(instance=project)\n args = {}\n args.update(csrf(request))\n args['me'] = me\n args['project'] = project\n args['project_edit_form'] = form\n return render_to_response('edit_project.html', args)\n\n\n@login_required\ndef make_reply(request, project_id):\n project = Project.objects.get(project_id=project_id)\n me = request.user\n try:\n # If user already replied on this project exception wouldn't raise and you get HttpResponse.\n Reply.objects.filter(project_id=project_id).get(author_id=me)\n return HttpResponse('Вы уже откликались на этот проект')\n except ObjectDoesNotExist:\n if request.method == 'POST':\n tmp_form = PerformersReplyForm(request.POST, request.FILES)\n if tmp_form.is_valid():\n form = tmp_form.save(commit=False)\n form.project = project\n form.author = me\n form.save()\n return HttpResponseRedirect(reverse('projects:getProject', args=[project_id]))\n else:\n args = {}\n args.update(csrf(request))\n args['performers_reply_form'] = PerformersReplyForm()\n args['project'] = project\n return render_to_response('make_reply.html', args)\n\n\ndef reply_details(request, project_id, reply_id):\n try:\n reply = Reply.objects.get(id=reply_id)\n except ObjectDoesNotExist:\n raise Http404('Отклика с таким id не существует')\n args = {}\n args['reply'] = reply\n return render_to_response('reply_details.html', args)\n\n\n'''\nclass ProtectedViev(View):\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ProtectedViev, self).dispatch(*args, **kwargs)\n\n\nclass ProjectView(ProtectedViev):\n\n def get(self, request, project_id):\n project = Project.objects.get(project_id=project_id)\n args = {}\n args['project'] = project\n return render_to_response('get_project.html', args)\n'''\n","sub_path":"projects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"5766477","text":"from architecture.decoder.cnn_decoder_block import CnnDecoderBlock\r\nimport torch\r\nimport torch.nn as nn\r\nfrom modules.view import View\r\nfrom modules.map_tanh_zero_one import MapTanhZeroOne\r\n\r\n\r\nclass Decoder(nn.Module):\r\n def __init__(self, latent_size: int):\r\n super().__init__()\r\n self.__sequential_blocks = [\r\n nn.Linear(latent_size, 256*8*8),\r\n nn.ReLU(True),\r\n View(-1, 256, 8, 8),\r\n CnnDecoderBlock(256, 128),\r\n CnnDecoderBlock(128, 64),\r\n nn.ConvTranspose2d(64, 3, 3, 1, 1),\r\n nn.Tanh(),\r\n MapTanhZeroOne()\r\n ]\r\n self.main = nn.Sequential(*self.__sequential_blocks)\r\n\r\n def forward(self, input_latent: torch.Tensor) -> torch.Tensor:\r\n decoded_images = self.main(input_latent)\r\n return decoded_images\r\n","sub_path":"src/architecture/decoder/svhn_decoder.py","file_name":"svhn_decoder.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"626199842","text":"#!/usr/bin/env python3\n\n\"\"\"encode/decode base58 in the same way that Bitcoin does\"\"\"\n\nimport math\n\n__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'\n__b58base = len(__b58chars)\n\n__usage = \"\"\"Usage: python base58.py \n\nthis will return the bitcoin address for the associated public key\"\"\"\n\ndef b58encode(v):\n \"\"\"encode v, which is a string of bytes, to base58\"\"\"\n\n long_value = 0\n for (i, c) in enumerate(v[::-1]):\n # print(c, i, 8*i, c << (8*i))\n long_value += c << (8*i) # 2x speedup vs. exponentiation\n\n result = ''\n while long_value >= __b58base:\n div, mod = divmod(long_value, __b58base)\n result = __b58chars[mod] + result\n long_value = div\n result = __b58chars[long_value] + result\n\n # Bitcoin does a little leading-zero-compression:\n # leading 0-bytes in the input become leading-1s\n nPad = 0\n for c in v:\n if c == 0: \n nPad += 1\n else: \n break\n\n return (__b58chars[0]*nPad) + result\n\ndef b58decode(v, length):\n \"\"\"decode v into a string of len bytes\"\"\"\n long_value = 0\n for (i, c) in enumerate(v[::-1]):\n long_value += __b58chars.find(c) * (__b58base**i)\n\n result = ''\n while long_value >= 256:\n div, mod = divmod(long_value, 256)\n result = chr(mod) + result\n long_value = div\n result = chr(long_value) + result\n\n nPad = 0\n for c in v:\n if c == __b58chars[0]: \n nPad += 1\n else: \n break\n\n result = chr(0)*nPad + result\n if length is not None and len(result) != length:\n return None\n return result\n\ntry:\n # Python Crypto library is at: http://www.dlitz.net/software/pycrypto/\n # Needed for RIPEMD160 hash function, used to compute\n # Bitcoin addresses from internal public keys.\n import hashlib\n import binascii\n have_crypto = True\nexcept ImportError:\n have_crypto = False\n\ndef hash_160(public_key):\n if not have_crypto:\n raise ImportError\n pubUnHexed = binascii.unhexlify(bytes(public_key, 'ascii'))\n h1 = hashlib.sha256(pubUnHexed).digest()\n ripemd = hashlib.new('ripemd160')\n ripemd.update(h1)\n h2 = ripemd.digest()\n return h2\n\ndef public_key_to_bc_address(public_key):\n if not have_crypto:\n raise ImportError\n h160 = hash_160(public_key)\n return hash_160_to_bc_address(h160)\n\ndef hash_160_to_bc_address(h160):\n if not have_crypto:\n raise ImportError\n vh160 = b\"\\x00\"+h160 # \\x00 is version 0\n h3 = hashlib.sha256(hashlib.sha256(vh160).digest()).digest()\n addr=vh160+h3[0:4]\n return b58encode(addr)\n\ndef bc_address_to_hash_160(addr):\n bytes = b58decode(addr, 25)\n return bytes[1:21]\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) is not 2 :\n print(__usage)\n else :\n pubKey = sys.argv[1]\n print(public_key_to_bc_address(pubKey))\n","sub_path":"base58.py","file_name":"base58.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"488351050","text":"import os\nfrom threading import Thread\nimport numpy as np\nimport pandas as pd\nimport docx\nfrom docx import Document # 导入库\nfrom win32com import client as wc\nfrom win32com.client import Dispatch, constants\nfrom docx import Document\nfrom decompress_fold import decompress_folder\n\nbase_url = \"F:\\B074\"\nunzip_url = r\"F:\\B074_unzip\\\\\" # 一次解压后的新文件保存的路径\nunzip_high_url = \"F:\\B074_originalI_file\" # 二次解压后的新文件保存的路径\ndirs = os.listdir(base_url) #原始订单名列表\nsrcond_dirs = os.listdir(unzip_url)\n\ncustomer_code_list = [] #客户代码\nfor filename in dirs:\n customer_code = filename.split('.')[0][2:5]\n customer_code_list.append(customer_code)\n\none_zip_path = [] #原始文件名列表\noriginal_order_list = [] #要提取的文件名列表\nfor one_zip in srcond_dirs:\n for dir in dirs:\n if one_zip.split(' ')[-1] ==dir:\n one_zip_path.append(decompress_folder(one_zip))\n\nfull_path=''\nfor file_path in one_zip_path:\n full_path=os.path.join(unzip_url,file_path)\n original_order_list.append(os.listdir(file_path)[-1])\ncontent_list=[] #提取项\nfor doc in original_order_list:\n with open(os.path.join(full_path,doc)) as f:\n w = wc.Dispatch('Word.Application')\n document = w.Documents(unzip_high_url + str(doc)) # 读入文件\n tables = document.tables # 获取文件中的表格集\n table = tables[0] # 获取文件中的第一个表格\n WorksRequirements_0 = table.cell(5, 0).text # 工程要求或技术要求,cell(5,0)表示第6行第1列数据,以此类推\n WorksRequirements_1 = table.cell(5, 1).text # 工程要求或技术要求内容\n specialThings_0 = table.cell(6, 0).text # 注意事项\n specialThings_1 = table.cell(6, 1).text # 注意事项内容\n content = {WorksRequirements_0: WorksRequirements_1, specialThings_0: specialThings_1}\n content_dict = {doc: content}\n content_list.append(content_dict)\n\ndata_df = pd.DataFrame(\n np.arange(6315).reshape(1263, 5),\n # columns=[\n # '客户代码',\n # '原始订单名',\n # '原始文件名',\n # '要提取的文件名',\n # '提取项',\n # ]\n)\nprint(one_zip_path)\nprint(original_order_list)\nprint(content_list)\ndata_df.insert(0,'客户代码',customer_code_list)\ndata_df.insert(1,'原始订单名',dirs)\ndata_df.insert(2,'原始文件名',one_zip_path)\ndata_df.insert(3,'要提取的文件名',original_order_list)\ndata_df.insert(4,'提取项',content_list)\nprint(data_df)\nwriter = pd.ExcelWriter('BO74_data.xlsx')\ndata_df.to_excel(writer, float_format='%.5f')\nwriter.save()","sub_path":"sort_file.py","file_name":"sort_file.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"216145185","text":"import subprocess\n\nfrom django.shortcuts import render\nfrom homepage.forms import CowForm\nfrom homepage.models import Cow\n\ndef index(request):\n if request.method == \"POST\":\n form = CowForm(request.POST)\n\n if form.is_valid():\n data = form.cleaned_data\n text = data.get(\"text\")\n Cow.objects.create(\n text = text\n )\n cowsaid = subprocess.run(f'cowsay \"{text}\"', capture_output=True, shell=True).stdout.decode(\"utf-8\").strip()\n form = CowForm()\n return render(request, \"index.html\", {\"form\": form, \"subprocess\": cowsaid})\n form = CowForm()\n return render(request, \"index.html\", {\"form\": form})\n\ndef history(request):\n cows = Cow.objects.all().order_by(\"-id\")[:10]\n return render(request, \"history.html\", {\"cowsays\": cows})","sub_path":"homepage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"21339092","text":"#!/usr/bin/env python3\nimport sys\nimport os\nimport collections\nimport subprocess\nfrom textwrap import dedent as _dd\nfrom psycopg2 import OperationalError, ProgrammingError, connect\n\nfrom utils import env\nfrom git_odoo import _repos, _get_version_from_db, App as _git_odoo_app\n\n\n########################\n# decorators stuff #\n########################\n\nCALLABLE_FROM_SHELL = set()\nSHELL_END_HOOK = set()\nSHELL_DIFFERED_COMMANDS_FILE = f\"{env.AP}/differed_commands.txt\"\ndiffered_sh_run_new_batch = True\n\n\ndef call_from_shell(func):\n # decorator for functions that are meant to be called directly from the shell\n CALLABLE_FROM_SHELL.add(func.__name__)\n return func\n\n\ndef shell_end_hook(func):\n # decorator for functions that need to call a shell\n # command AFTER the python script exits\n # the decorated app should call `differed_sh_run`\n SHELL_END_HOOK.add(func.__name__)\n return func\n\n\ndef differed_sh_run(cmd):\n # prepare a command to be executed after the end of the python script\n # can only work in functions decorated with `shell_end_hook`\n # or called by functions decorated with `shell_end_hook`\n global differed_sh_run_new_batch\n write_mode = \"w\" if differed_sh_run_new_batch else \"a\"\n with open(SHELL_DIFFERED_COMMANDS_FILE, write_mode) as f:\n f.write(cmd + \"\\n\")\n differed_sh_run_new_batch = False\n\n\n#####################################\n# Custom classes and exceptions #\n#####################################\n\n\nclass Invalid_params(Exception):\n pass\n\n\nclass UserAbort(Exception):\n pass\n\n\n##########################\n# Helper functions #\n##########################\n\n\ndef _get_branch_name(path):\n # return the name of the current branch of repo :path\n # _repos expects multiple path entries in an itterable\n # giving one in a list\n repo_generator = _repos([path])\n repo = list(repo_generator)[0]\n return repo.active_branch.name\n\n\n@call_from_shell\ndef git_branch_version(*args):\n (path,) = args\n print(_get_branch_name(path))\n\n\ndef _check_file_exists(path):\n # returns True if the file :path exists, False otherwize\n try:\n with open(path) as f:\n return True\n except IOError:\n return False\n\n\ndef sh_run(cmd, **kwargs):\n # wrapper for subprocess.run\n if \"stdout\" not in kwargs.keys():\n kwargs[\"stdout\"] = subprocess.PIPE\n if \"|\" not in cmd:\n cmd = cmd.split()\n return subprocess.run(cmd, **kwargs).stdout.decode(\"utf-8\")\n else:\n process = subprocess.Popen(cmd, shell=True, **kwargs)\n return process.communicate()[0].decode(\"utf-8\")\n\n\n@call_from_shell\ndef clear_pyc(*args):\n # remove compiled python files from the main source folder\n sh_run(f\"find {env.SRC} -type d -name __pycache__ | xargs rm -rf\")\n sh_run(f\"find {env.SRC} -name '*.pyc' -delete\")\n if args and args[0] == \"--all\":\n sh_run(f\"find {env.SRC_MULTI} -type d -name __pycache__ | xargs rm -rf\")\n sh_run(f\"find {env.SRC_MULTI} -name '*.pyc' -delete\")\n\n\ndef psql(dbname, query):\n # execute an sql query on a given database\n with connect(f\"dbname='{dbname}'\") as conn, conn.cursor() as cr:\n cr.execute(query)\n try:\n return cr.fetchall()\n except ProgrammingError:\n # printing a tactical dot to know that we went through here at least\n print(\".\")\n return []\n\n\n#####################################################################################\n# Put \"main\" functions bellow this bloc #\n# The params of functions callable from the shell are positional, and string only #\n#####################################################################################\n\n\ndef _so_checker(*args):\n # check that the params given to 'so' are correct,\n # check that I am not trying to start a protected DB,\n # check that I am sure to want to start a DB with the wrong branch checked out (only check $ODOO)\n\n if len(args) == 0:\n raise Invalid_params(\n _dd(\n \"\"\"\\\n At least give me a name :(\n so dbname [port] [other_parameters]\n note: port is mandatory if you want to add other parameters\"\"\"\n )\n )\n db_name = args[0]\n if db_name.startswith(\"CLEAN_ODOO\"):\n raise Invalid_params(\n _dd(\n f\"\"\"\\\n Don't play with that one!\n {db_name} is a protected database.\"\"\"\n )\n )\n try:\n db_version = _get_version_from_db(db_name)\n except OperationalError:\n # db doesn't exist.\n pass\n else:\n checked_out_branch = _get_branch_name(env.ODOO)\n if db_version != checked_out_branch:\n print(\n _dd(\n f\"\"\"\\\n Version mismatch\n DB version is: {db_version}\n repo version is: {checked_out_branch}\"\"\"\n )\n )\n ans = input(\"continue anyway? (y/N):\").lower()\n if ans == \"y\":\n print(\"I hope you know what you're doing...\")\n else:\n raise UserAbort(\"Yeah, that's probably safer :D\")\n if len(args) >= 2:\n try:\n int(args[1])\n except ValueError as ve:\n bad_port = str(ve).split(\":\")[1][2:-1]\n raise Invalid_params(\n f\"\"\"The port number must be an integer. Provided value : {bad_port}\"\"\"\n )\n\n\n@call_from_shell\ndef _so_builder(*args):\n # build the command to start odoo\n db_name = args[0]\n if len(args) < 2:\n cmd = _so_builder(db_name, 8069)\n return cmd\n port_number = args[1]\n ODOO_BIN_PATH = f\"{env.ODOO}/odoo-bin\"\n ODOO_PY_PATH = f\"{env.ODOO}/odoo.py\"\n PATH_COMMUNITY = f\"--addons-path={env.ODOO}/addons\"\n PATH_ENTERPRISE = (\n f\"--addons-path={env.ENTERPRISE},{env.ODOO}/addons,{env.SRC}/design-themes\"\n )\n PARAMS_NORMAL = f\"--db-filter=^{db_name}$ -d {db_name} --xmlrpc-port={port_number}\"\n additional_params = \" \".join(args[2:])\n if _check_file_exists(ODOO_BIN_PATH):\n # version 10 or above\n cmd = f\"{ODOO_BIN_PATH} {PATH_ENTERPRISE} {PARAMS_NORMAL} {additional_params}\"\n else:\n # version 9 or below\n try:\n version = _get_version_from_db(db_name)\n except OperationalError as e:\n msg = f\"\"\"{e}\n Note:\n `so` does not work with DBs < 10.0, unless it already exists\n This will probably never be fixed.\"\"\"\n raise Invalid_params(msg)\n if version == \"8.0\":\n cmd = f\"{ODOO_PY_PATH} {PATH_COMMUNITY} {PARAMS_NORMAL} {additional_params}\"\n else:\n cmd = (\n f\"{ODOO_PY_PATH} {PATH_ENTERPRISE} {PARAMS_NORMAL} {additional_params}\"\n )\n print(cmd)\n return cmd\n\n\n@call_from_shell\ndef so(*args):\n # start an odoo db\n if len(args) and args[0] == \"--help\":\n so(\"fakeDBname\", 678, \"--help\")\n # fakeDBname & 678 don't mean anything here\n return\n _so_checker(*args)\n cmd = _so_builder(*args)\n sh_run(cmd)\n\n\ndef _soiu(mode, *args):\n assert mode in (\"install\", \"upgrade\")\n mode = \"-i\" if mode == \"install\" else \"-u\"\n dbname, *apps = args\n assert apps, \"No apps list provided\"\n apps = \",\".join(apps)\n so(dbname, 1234, mode, apps, \"--stop-after-init\")\n\n\n@call_from_shell\ndef soi(*args):\n # install modules args[1:] on DB args[0]\n _soiu(\"install\", *args)\n\n\n@call_from_shell\ndef sou(*args):\n # upgrade modules args[1:] on DB args[0]\n _soiu(\"upgrade\", *args)\n\n\n# start python scripts with the vscode python debugger\n# note that the debbuger is on the called script,\n# if that script calls another one, that one is not \"debugged\"\n# so it doesn't work with oe-support.\n# doesn't work with alias calling python scripts\n@call_from_shell\ndef ptvsd2(*args):\n cmd = \"python2 -m ptvsd --host localhost --port 5678\".split() + list(args)\n subprocess.run(cmd)\n\n\n@call_from_shell\ndef ptvsd3(*args):\n cmd = \"python3 -m ptvsd --host localhost --port 5678\".split() + list(args)\n subprocess.run(cmd)\n\n\ndef _ptvsd_so(python_version, *args):\n args = list(args) + [\"--limit-time-real=1000\", \"--limit-time-cpu=600\"]\n _so_checker(*args)\n cmd = _so_builder(*args)\n cmd = cmd.split()\n if python_version == 3:\n ptvsd3(*cmd)\n else:\n ptvsd2(*cmd)\n\n\n@call_from_shell\ndef ptvsd2_so(*args):\n _ptvsd_so(2, *args)\n\n\n@call_from_shell\ndef ptvsd3_so(*args):\n _ptvsd_so(3, *args)\n\n\n@shell_end_hook\n@call_from_shell\ndef go(*args):\n # switch branch for all odoo repos\n print(\"cleaning all the junk\")\n clear_pyc()\n params = {\"checkout\": True, \"\": args}\n _git_odoo_app(**params)\n if len(args) == 1:\n differed_sh_run(f\"go_venv {args[0]}\")\n print(\"-----------\")\n differed_sh_run(\"golist\")\n\n\n@shell_end_hook\n@call_from_shell\ndef go_update_and_clean(*args):\n # git pull on all the repos of the main source folder (except for support-tools)\n version = args[0] if args else None\n params = {\"pull\": True, \"--version\": version}\n _git_odoo_app(**params)\n clear_pyc()\n differed_sh_run(\"go_venv_current\")\n differed_sh_run(\"echo '--------'\")\n differed_sh_run(\"golist\")\n\n\n@shell_end_hook\n@call_from_shell\ndef godb(*args):\n # switch repos branch to the version of the given DB\n db_name = args[0]\n try:\n version = _get_version_from_db(db_name)\n except OperationalError:\n print(f\"DB {db_name} does not exist\")\n else:\n params = {\"checkout\": True, \"--dbname\": db_name}\n _git_odoo_app(**params)\n differed_sh_run(f\"go_venv {version}\")\n\n\n@shell_end_hook\n@call_from_shell\ndef goso(*args):\n # switch repos to the version of given db and starts it\n db_name = args[0]\n godb(db_name)\n so(*args)\n\n\n@shell_end_hook\n@call_from_shell\ndef dropodoo(*args):\n \"\"\"drop the given DBs and remove its filestore,\n also removes it from meta if it was a local saas db\"\"\"\n import appdirs\n from shutil import rmtree\n\n if not args:\n raise Invalid_params(\n \"\"\"\\\n Requires the name(s) of the DB(s) to drop\n dropodoo \"\"\"\n )\n protection_file = f\"{env.AP}/drop_protected_dbs.txt\"\n with open(protection_file, \"r\") as f:\n drop_protected_dbs = [db.strip() for db in f]\n for db in args:\n if db in drop_protected_dbs:\n raise Invalid_params(\n f\"\"\"\\\n DB {db} is drop protected --> aborting\n To override protection, modify the protection file at {protection_file}\"\"\"\n )\n # remove from meta\n psql(\"meta\", f\"DELETE FROM databases WHERE name = '{db}'\")\n # dropping\n if db.startswith(\"oe_support_\"):\n print(f\"Dropping the DB {db} using oe-support\")\n differed_sh_run(f\"oes cleanup {db[11:]}\")\n else:\n psql(\n \"postgres\",\n f\"SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '{db}'\",\n )\n sh_run(f\"dropdb {db}\")\n FS_DIR = os.path.join(appdirs.user_data_dir(\"Odoo\"), \"filestore\")\n filestore_path = os.path.expanduser(os.path.join(FS_DIR, db))\n rmtree(filestore_path)\n\n\n@call_from_shell\ndef go_fetch(*args):\n # git fetch on all the repos of the main source folder\n _git_odoo_app(fetch=True)\n\n\n# vvvvvv not strictly odoo vvvvvvv\n\n\n@call_from_shell\ndef shurl(*args):\n \"\"\"returns (and prints) a short (and tracked) url version of a link\n hosted on an odoo saas server\"\"\"\n import xmlrpc.client\n from functools import partial\n\n api_key = env.SHORT_URL_KEY\n api_login = env.SHORT_URL_LOGIN\n assert all((api_key, api_login))\n long_url = args[0]\n dburl = \"https://short-url.moens.xyz\"\n db = \"noapp\"\n # connect to https://short-url.moens.xyz/ create a link.tracker with args[0] as the url field\n # the get short_url field from the newly created record\n common = xmlrpc.client.ServerProxy(\"{}/xmlrpc/2/common\".format(dburl))\n models = xmlrpc.client.ServerProxy(\"{}/xmlrpc/2/object\".format(dburl))\n uid = common.authenticate(db, api_login, api_key, {})\n r_exec = partial(models.execute_kw, db, uid, api_key)\n data = {\"url\": long_url}\n url_id = r_exec(\"link.tracker\", \"create\", [data])\n short_url = r_exec(\n \"link.tracker\",\n \"search_read\",\n [[[\"id\", \"=\", url_id]]],\n {\"fields\": [\"short_url\"]},\n )[0][\"short_url\"]\n print(short_url)\n return short_url\n\n\n@shell_end_hook\n@call_from_shell\ndef dummy_command(*args):\n print(\"in python\")\n differed_sh_run(\"echo 'in shell'\")\n\n\n# ^^^^^^^^^^^ aliasable functions above this line ^^^^^^^^^\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n method_name = sys.argv[1]\n assert method_name in CALLABLE_FROM_SHELL\n method_params = sys.argv[2:]\n method_params = \", \".join(f\"'{param}'\" for param in method_params)\n try:\n eval(f\"{method_name}({method_params})\")\n except (Invalid_params, UserAbort) as nice_e:\n print(nice_e)\n else:\n print(\"Missing arguments, require at least the function name\")\n","sub_path":"python_scripts/odoo_alias.py","file_name":"odoo_alias.py","file_ext":"py","file_size_in_byte":13318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"162835079","text":"import json\nimport runmanager.remote\nimport os\nimport time\nimport shutil\n\nremoteClient = runmanager.remote.Client()\nrecieved_json_folder = R'C:\\Users\\Rohit_Prasad_Bhatt\\Documents\\Django_labscript\\media\\uploads'\nexecuted_json_folder = R'C:\\Users\\Rohit_Prasad_Bhatt\\Documents\\Django_labscript\\media\\uploads\\executed'\njson_status_folder = R'C:\\Users\\Rohit_Prasad_Bhatt\\Documents\\Django_labscript\\media\\uploads\\status'\nexp_script_folder = R'C:\\Users\\Rohit_Prasad_Bhatt\\labscript-suite\\userlib\\labscriptlib\\example_apparatus'\n\ndef check_json_dict(json_dict):\n return True\n\ndef modify_shot_output_folder(job_id):\n defaut_shot_folder = str(remoteClient.get_shot_output_folder())\n modified_shot_folder = (defaut_shot_folder.rsplit('\\\\',1)[0])+'\\\\'+job_id\n remoteClient.set_shot_output_folder(modified_shot_folder)\n\ndef gen_script_and_globals(json_dict):\n globals_dict = {'user_id':json_dict['user_id'],'shots':json_dict['experiment_0']['shots']}\n remoteClient.set_globals(globals_dict)\n remoteClient.set_globals(globals_dict)\n script_name = 'Experiment_' + globals_dict['user_id'] + '.py'\n exp_script = os.path.join(exp_script_folder, script_name)\n ins_list = json_dict['experiment_0']['instructions']\n func_dict = {'rx':'rx','delay':'delay','measure':'measure'}\n header_path = R'C:\\Users\\Rohit_Prasad_Bhatt\\labscript-suite\\userlib\\labscriptlib\\example_apparatus\\header.py'\n code = ''\n try:\n with open(header_path, \"r\") as header_file:\n code=header_file.read()\n except:\n print('Something wrong. Does path file exists?')\n\n try:\n with open(exp_script, \"w\") as script_file:\n script_file.write(code)\n except:\n print('Something wrong. Does path file exists?')\n\n for i in range(len(ins_list)):\n inst = ins_list[i]\n func_name = func_dict[ins_list[i][0]]\n params = '('+str(ins_list[i][1:])[1:-1]+')'\n code = 'Experiment.'+func_name+params+'\\n'\n try:\n with open(exp_script, \"a\") as script_file:\n script_file.write(code)\n except:\n print('Something wrong. Does path file exists?')\n\n code = 'stop(Experiment.t+0.1)'\n try:\n with open(exp_script, \"a\") as script_file:\n script_file.write(code)\n except:\n print('Something wrong. Does path file exists?')\n remoteClient.set_labscript_file(exp_script) # CAUTION !! This command only selects the file. It does not generate it!\n return exp_script\n\nwhile True:\n time.sleep(3)\n files = list(fn for fn in next(os.walk(recieved_json_folder))[2])\n if not files:\n continue\n else:\n json_name = (sorted(files))[0]\n job_id = (json_name)[5:-5]\n recieved_json_path = os.path.join(recieved_json_folder, json_name)\n executed_json_path = os.path.join(executed_json_folder, json_name)\n status_file_name = 'status_'+job_id+'.txt'\n status_file_path = os.path.join(json_status_folder, status_file_name)\n with open(recieved_json_path) as file:\n data = json.load(file)\n json_is_fine = check_json_dict(data)\n if json_is_fine:\n with open(status_file_path, 'w') as status_file:\n status_msg = 'Passed json sanity check'\n status_file.write(status_msg)\n exp_script = gen_script_and_globals(data)\n remoteClient.reset_shot_output_folder()\n modify_shot_output_folder(job_id)\n remoteClient.engage()\n with open(status_file_path, 'w') as status_file:\n status_msg = 'Compilation done. Shots sent to BLACS'\n status_file.write(status_msg)\n shutil.move(recieved_json_path, executed_json_path)\n #os.remove(recieved_json_path)\n #os.remove(exp_script)\n else:\n with open(status_file_path, 'w') as status_file:\n status_msg = 'Failed json sanity check. File will be deleted'\n status_file.write(status_msg)\n os.remove(recieved_json_path)\n","sub_path":"Django_labscript/Spooler.py","file_name":"Spooler.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"445428117","text":"'''\nScript to run LAS model\n\nModified from LAS implementation by Sai Krishna Rallabandi (srallaba@andrew.cmu.edu)\n\nPeter Wu\npeterw1@andrew.cmu.edu\n'''\n\nimport argparse\nimport csv\nimport itertools\nimport numpy as np\nimport os\nimport sys\nimport time\nimport torch\n\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.nn.utils.rnn import PackedSequence\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\nfrom model_utils import *\n\n\nclass SequenceShuffle(nn.Module):\n # Performs pooling for pBLSTM\n def forward(self, seq):\n assert isinstance(seq, PackedSequence)\n padded, lens = pad_packed_sequence(seq) # (L, BS, D)\n padded = padded.transpose(0, 1)\n if padded.size(1) % 2 > 0:\n padded = padded[:, :-1, :]\n padded = padded.contiguous()\n padded = padded.view(padded.size(0), padded.size(1) // 2, 2 * padded.size(2))\n padded = padded.transpose(0, 1)\n newlens = np.array(lens) // 2\n newseq = pack_padded_sequence(padded, newlens)\n return newseq\n\n\nclass AdvancedLSTM(nn.LSTM):\n '''\n Class for learning initial hidden states when using LSTMs\n '''\n def __init__(self, input_dim, output_dim, args, **kwargs):\n super(AdvancedLSTM, self).__init__(input_dim, output_dim, **kwargs)\n bi = 2 if self.bidirectional else 1\n self.h0 = Variable(torch.zeros((bi, 1, self.hidden_size), dtype=torch.float32))\n self.c0 = Variable(torch.zeros((bi, 1, self.hidden_size), dtype=torch.float32))\n if torch.cuda.is_available():\n self.h0 = self.h0.cuda(args.cuda)\n self.c0 = self.c0.cuda(args.cuda)\n\n def initial_state(self, n):\n return (\n self.h0.expand(-1, n, -1).contiguous(),\n self.c0.expand(-1, n, -1).contiguous()\n )\n\n def forward(self, x, hx=None):\n if hx is None:\n n = x.batch_sizes[0]\n hx = self.initial_state(n)\n return super(AdvancedLSTM, self).forward(x, hx=hx)\n\n\nclass pLSTM(AdvancedLSTM):\n # Pyramidal LSTM\n def __init__(self, *args, **kwargs):\n super(pLSTM, self).__init__(*args, **kwargs)\n self.shuffle = SequenceShuffle()\n\n def forward(self, x, hx=None):\n return super(pLSTM, self).forward(self.shuffle(x), hx=hx)\n\nINPUT_DIM = 39\n\nclass EncoderModel(nn.Module):\n # Encodes utterances to produce keys and values\n def __init__(self, args):\n super(EncoderModel, self).__init__()\n self.rnns = nn.ModuleList()\n self.rnns.append(AdvancedLSTM(INPUT_DIM, args.encoder_dim, args, bidirectional=True))\n self.rnns.append(pLSTM(args.encoder_dim * 4, args.encoder_dim, args, bidirectional=True))\n self.rnns.append(pLSTM(args.encoder_dim * 4, args.encoder_dim, args, bidirectional=True))\n self.rnns.append(pLSTM(args.encoder_dim * 4, args.encoder_dim, args, bidirectional=True))\n self.key_projection = nn.Linear(args.encoder_dim * 2, args.key_dim)\n self.value_projection = nn.Linear(args.encoder_dim * 2, args.value_dim)\n self.cuda = args.cuda\n\n def forward(self, utterances, utterance_lengths):\n h = utterances\n\n # Sort and pack the inputs\n sorted_lengths, order = torch.sort(utterance_lengths, 0, descending=True)\n _, backorder = torch.sort(order, 0)\n h = h[:, order, :]\n h = pack_padded_sequence(h, sorted_lengths.data.cpu().numpy())\n\n # RNNs\n for rnn in self.rnns:\n h, _ = rnn(h)\n\n # Unpack and unsort the sequences\n h, output_lengths = pad_packed_sequence(h)\n h = h[:, backorder, :]\n output_lengths = torch.from_numpy(np.array(output_lengths))\n if backorder.data.is_cuda:\n output_lengths = output_lengths.cuda(self.cuda)\n output_lengths = output_lengths[backorder.data]\n\n # Apply key and value\n keys = self.key_projection(h)\n values = self.value_projection(h)\n\n return keys, values, output_lengths\n\n\ndef sample_gumbel(shape, eps=1e-10, out=None):\n \"\"\"\n Sample from Gumbel(0, 1)\n based on\n https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,\n (MIT license)\n \"\"\"\n U = out.resize_(shape).uniform_() if out is not None else torch.rand(shape)\n return - torch.log(eps - torch.log(U + eps))\n\n\ndef gumbel_argmax(logits, dim):\n # Draw from a multinomial distribution efficiently\n return torch.max(logits + sample_gumbel(logits.size(), out=logits.data.new()), dim)[1]\n\n\nclass AdvancedLSTMCell(nn.LSTMCell):\n # Extend LSTMCell to learn initial state\n def __init__(self, input_dim, output_dim, args, **kwargs):\n super(AdvancedLSTMCell, self).__init__(input_dim, output_dim, **kwargs)\n self.h0 = Variable(torch.zeros((1, self.hidden_size), dtype=torch.float32))\n self.c0 = Variable(torch.zeros((1, self.hidden_size), dtype=torch.float32))\n if torch.cuda.is_available():\n self.h0 = self.h0.cuda(args.cuda)\n self.c0 = self.c0.cuda(args.cuda)\n\n def initial_state(self, n):\n '''\n Return:\n (shape (1, self.hidden_size) tensor, shape (1, self.hidden_size) tensor)\n '''\n return (\n self.h0.expand(n, -1).contiguous(),\n self.c0.expand(n, -1).contiguous()\n )\n\n\ndef calculate_attention(keys, mask, queries):\n \"\"\"Attention calculation\n\n Based on https://arxiv.org/abs/1409.0473 \n\n Args:\n keys: linear transformation of encoder output,\n shape (N, L, dec_hidden_dim)\n mask: lengths, shape (N, L)\n queries: linear transformation of previous decoder hidden state,\n shape (N, dec_hidden_dim)\n \n Return:\n attn: attention, shape (N, L)\n \"\"\"\n energy = torch.bmm(keys, queries.unsqueeze(2)).squeeze(2) * mask # (B, T)\n energy = energy - (1 - mask) * 1e4 # subtract large number from padded region\n emax = torch.max(energy, 1)[0].unsqueeze(1) # (B, T)\n exp_e = torch.exp(energy - emax) * mask # (B, T)\n attn = exp_e / (exp_e.sum(1).unsqueeze(1)) # (B, T)\n return attn\n\n\ndef calculate_context(attn, values):\n \"\"\"Context calculation\n \n Args:\n attn: alpha's, shape (num_batches, seq_len)\n values: h's, shape (num_batches, seq_len, dec_hidden_dim)\n\n Return:\n ctx: context, shape (num_batches, dec_hidden_dim)\n \"\"\"\n ctx = torch.bmm(attn.unsqueeze(1), values).squeeze(1) # (N, value_dim)\n return ctx\n\n\nclass DecoderModel(nn.Module):\n # Speller/Decoder\n def __init__(self, args, vocab_size):\n super(DecoderModel, self).__init__()\n self.embedding = nn.Embedding(vocab_size + 1, args.decoder_dim)\n self.input_rnns = nn.ModuleList()\n self.input_rnns.append(AdvancedLSTMCell(args.decoder_dim + args.value_dim, args.decoder_dim, args))\n self.input_rnns.append(AdvancedLSTMCell(args.decoder_dim, args.decoder_dim, args))\n self.input_rnns.append(AdvancedLSTMCell(args.decoder_dim, args.decoder_dim, args))\n self.key_projection = nn.Linear(args.key_dim, args.decoder_dim)\n self.query_projection = nn.Linear(args.decoder_dim, args.decoder_dim)\n self.char_projection = nn.Sequential(\n nn.Linear(args.decoder_dim+args.value_dim, args.decoder_dim),\n nn.LeakyReLU(),\n nn.Linear(args.decoder_dim, vocab_size+1)\n )\n self.force_rate = args.teacher_force_rate\n self.char_projection[-1].weight = self.embedding.weight # weight tying\n self.cuda = args.cuda\n\n def forward_pass(self, input_t, keys, values, mask, ctx, input_states):\n # Embed the previous character\n embed = self.embedding(input_t)\n # Concatenate embedding and previous context\n ht = torch.cat((embed, ctx), dim=1)\n # Run first set of RNNs\n new_input_states = []\n for rnn, state in zip(self.input_rnns, input_states):\n ht, newstate = rnn(ht, state)\n new_input_states.append((ht, newstate))\n new_keys = self.key_projection(keys)\n queries = self.query_projection(ht)\n # Calculate attention\n attn = calculate_attention(keys=new_keys, mask=mask, queries=queries)\n # Calculate context\n ctx = calculate_context(attn=attn, values=values)\n # Concatenate hidden state and context\n ht = torch.cat((ht, ctx), dim=1)\n # Run projection\n logit = self.char_projection(ht)\n # Sample from logits\n generated = gumbel_argmax(logit, 1) # (N,)\n return logit, generated, ctx, attn, new_input_states\n\n def forward(self, inputs, input_lengths, keys, values, utterance_lengths, future=0):\n mask = Variable(output_mask(values.size(0), utterance_lengths).transpose(0, 1)).float()\n values = values.transpose(0, 1)\n keys = keys.transpose(0, 1)\n t = inputs.size(0)\n n = inputs.size(1)\n\n # Initial states\n input_states = [rnn.initial_state(n) for rnn in self.input_rnns]\n\n # Initial context\n h0 = input_states[-1][0]\n new_keys = self.key_projection(keys)\n queries = self.query_projection(h0)\n attn = calculate_attention(new_keys, mask, queries)\n ctx = calculate_context(attn, values)\n\n # Decoder loop\n logits = []\n attns = []\n generateds = []\n for i in range(t):\n # Use forced or generated inputs\n if len(generateds) > 0 and self.force_rate < 1 and self.training:\n input_forced = inputs[i]\n input_gen = generateds[-1]\n input_mask = Variable(input_forced.data.new(*input_forced.size()).bernoulli_(self.force_rate))\n input_t = (input_mask * input_forced) + ((1 - input_mask) * input_gen)\n else:\n input_t = inputs[i]\n # Run a single timestep\n logit, generated, ctx, attn, input_states = self.forward_pass(\n input_t=input_t, keys=keys, values=values, mask=mask, ctx=ctx,\n input_states=input_states\n )\n # Save outputs\n logits.append(logit)\n attns.append(attn)\n generateds.append(generated)\n\n # For future predictions\n if future > 0:\n assert len(generateds) > 0\n input_t = generateds[-1]\n for _ in range(future):\n # Run a single timestep\n logit, generated, ctx, attn, input_states = self.forward_pass(\n input_t=input_t, keys=keys, values=values, mask=mask, ctx=ctx,\n input_states=input_states\n )\n # Save outputs\n logits.append(logit)\n attns.append(attn)\n generateds.append(generated)\n # Pass generated as next x\n input_t = generated\n\n # Combine all the outputs\n logits = torch.stack(logits, dim=0) # (L, N, Vocab Size)\n attns = torch.stack(attns, dim=0) # (L, N, T)\n generateds = torch.stack(generateds,dim=0)\n return logits, attns, generateds\n\n\nclass Seq2SeqModel(nn.Module):\n # Tie encoder and decoder together\n def __init__(self, args, vocab_size):\n super(Seq2SeqModel, self).__init__()\n self.encoder = EncoderModel(args)\n self.decoder = DecoderModel(args, vocab_size=vocab_size)\n self._state_hooks = {}\n\n def forward(self, utterances, utterance_lengths, chars, char_lengths, future=0):\n keys, values, lengths = self.encoder(utterances, utterance_lengths)\n logits, attns, generated = self.decoder(chars, char_lengths, keys, values, lengths, future=future)\n self._state_hooks['attention'] = attns.permute(1, 0, 2).unsqueeze(1)\n return logits, generated, char_lengths\n\n\ndef write_transcripts(path, args, model, loader, charset, log_path):\n # Write CSV file\n model.eval()\n os.makedirs(os.path.dirname(os.path.abspath(path)), exist_ok=True)\n with open(path, 'w', newline='') as f:\n w = csv.writer(f)\n transcripts = generate_transcripts(args, model, loader, charset)\n for i, t in enumerate(transcripts):\n w.writerow([i+1, t])\n with open(log_path, 'a') as ouf:\n ouf.write('%s\\n' % t)\n if (i+1) % 100 == 0:\n print('Wrote %d Lines' % (i+1))\n return transcripts\n\n\nclass SequenceCrossEntropy(nn.CrossEntropyLoss):\n # Customized CrossEntropyLoss\n def __init__(self, *args, **kwargs):\n super(SequenceCrossEntropy, self).__init__(*args, reduce=False, **kwargs)\n\n def forward(self, prediction, target):\n logits, generated, sequence_lengths = prediction\n maxlen = logits.size(0)\n mask = Variable(output_mask(maxlen, sequence_lengths.data)).float()\n logits = logits * mask.unsqueeze(2)\n losses = super(SequenceCrossEntropy, self).forward(logits.view(-1, logits.size(2)), target.view(-1))\n loss = torch.sum(mask.view(-1) * losses) / logits.size(1)\n return loss\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--batch-size', type=int, default=32, metavar='N', help='batch size')\n parser.add_argument('--save-directory', type=str, default='output/baseline/v1', help='output directory')\n parser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs')\n parser.add_argument('--patience', type=int, default=10, help='patience for early stopping')\n parser.add_argument('--num-workers', type=int, default=2, metavar='N', help='number of workers')\n parser.add_argument('--cuda', type=int, default=0, help='CUDA device')\n\n parser.add_argument('--lr', type=float, default=1e-3, metavar='N', help='lr')\n parser.add_argument('--weight-decay', type=float, default=1e-5, metavar='N', help='weight decay')\n parser.add_argument('--teacher-force-rate', type=float, default=0.9, metavar='N', help='teacher forcing rate')\n\n parser.add_argument('--encoder-dim', type=int, default=256, metavar='N', help='hidden dimension')\n parser.add_argument('--decoder-dim', type=int, default=512, metavar='N', help='hidden dimension')\n parser.add_argument('--value-dim', type=int, default=128, metavar='N', help='hidden dimension')\n parser.add_argument('--key-dim', type=int, default=128, metavar='N', help='hidden dimension')\n parser.add_argument('--generator-length', type=int, default=250, metavar='N', help='maximum length to generate')\n\n parser.add_argument('--test-mode', type=str, default='transcript', help='Test mode: transcript, cer, perp')\n\n return parser.parse_args()\n\ndef main():\n args = parse_args()\n\n t0 = time.time()\n\n if not os.path.exists(args.save_directory):\n os.makedirs(args.save_directory)\n LOG_PATH = os.path.join(args.save_directory, 'log')\n with open(LOG_PATH, 'w+') as ouf:\n pass\n\n print(\"Loading File IDs and Y Data\")\n train_ids, train_ys = load_fid_and_y_data('train')\n dev_ids, dev_ys = load_fid_and_y_data('dev')\n test_ids, test_ys = load_fid_and_y_data('test')\n\n _, train_ys_miami = load_fid_and_y_data_miami('train')\n _, dev_ys_miami = load_fid_and_y_data_miami('dev')\n _, test_ys_miami = load_fid_and_y_data_miami('test')\n t1 = time.time()\n print_log('%.2f Seconds' % (t1-t0), LOG_PATH)\n\n print(\"Building Charset\")\n charset = build_charset(np.concatenate((train_ys_miami, dev_ys_miami, test_ys_miami), axis=0))\n charmap = make_charmap(charset) # {string: int}\n charcount = len(charset)\n t1 = time.time()\n print_log('%.2f Seconds' % (t1-t0), LOG_PATH)\n\n print(\"Mapping Characters\")\n trainchars = map_characters(train_ys, charmap) # list of 1-dim int np arrays\n devchars = map_characters(dev_ys, charmap) # list of 1-dim int np arrays\n t1 = time.time()\n print_log('%.2f Seconds' % (t1-t0), LOG_PATH)\n\n print(\"Building Loader\")\n dev_loader = make_loader(dev_ids, devchars, args, shuffle=True, batch_size=args.batch_size)\n train_loader = make_loader(train_ids, trainchars, args, shuffle=True, batch_size=args.batch_size)\n test_loader = make_loader(test_ids, None, args, shuffle=False, batch_size=args.batch_size)\n t1 = time.time()\n print_log('%.2f Seconds' % (t1-t0), LOG_PATH)\n\n print(\"Building Model\")\n model = Seq2SeqModel(args, vocab_size=charcount)\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n criterion = SequenceCrossEntropy()\n t1 = time.time()\n print_log('%.2f Seconds' % (t1-t0), LOG_PATH)\n\n print(\"Running\")\n CKPT_PATH = os.path.join(args.save_directory, 'model.ckpt')\n if os.path.exists(CKPT_PATH):\n model.load_state_dict(torch.load(CKPT_PATH))\n if torch.cuda.is_available():\n model = model.cuda(args.cuda)\n\n best_val_loss = sys.maxsize\n prev_best_epoch = 0\n for e in range(args.epochs):\n t1 = time.time()\n print_log('Starting Epoch %d (%.2f Seconds)' % (e+1, t1-t0), LOG_PATH)\n\n # train\n model.train()\n optimizer.zero_grad()\n l = 0\n tot_perp = 0\n for i, t in enumerate(train_loader):\n uarray, ulens, l1array, llens, l2array = t\n if torch.min(ulens).item() > 8 and torch.min(llens).item() > 0:\n uarray, ulens, l1array, llens, l2array = Variable(uarray), \\\n Variable(ulens), Variable(l1array), Variable(llens), Variable(l2array)\n if torch.cuda.is_available():\n uarray, ulens, l1array, llens, l2array = uarray.cuda(args.cuda), \\\n ulens.cuda(args.cuda), l1array.cuda(args.cuda), llens.cuda(args.cuda), l2array.cuda(args.cuda)\n prediction = model(uarray, ulens, l1array, llens)\n logits, generated, char_lengths = prediction\n loss = criterion(prediction, l2array)\n perp = perplexity(logits, l2array, char_lengths, device=args.cuda)\n l += loss.item()\n tot_perp += perp.item()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25)\n optimizer.step()\n if (i+1) % 100 == 0:\n t1 = time.time()\n print('Processed %d Batches (%.2f Seconds)' % (i+1, t1-t0))\n print_log('Train Loss: %f' % (l/len(train_loader.dataset)), LOG_PATH)\n print_log('Avg Train Perplexity: %f' % (tot_perp/len(train_loader.dataset)), LOG_PATH)\n \n # val\n model.eval()\n with torch.no_grad():\n l = 0\n tot_perp = 0\n for i, t in enumerate(dev_loader):\n uarray, ulens, l1array, llens, l2array = t\n if torch.min(ulens).item() > 8 and torch.min(llens).item() > 0:\n uarray, ulens, l1array, llens, l2array = Variable(uarray), \\\n Variable(ulens), Variable(l1array), Variable(llens), Variable(l2array)\n if torch.cuda.is_available():\n uarray, ulens, l1array, llens, l2array = uarray.cuda(args.cuda), \\\n ulens.cuda(args.cuda), l1array.cuda(args.cuda), llens.cuda(args.cuda), l2array.cuda(args.cuda)\n prediction = model(uarray, ulens, l1array, llens)\n logits, generated, char_lengths = prediction\n loss = criterion(prediction, l2array)\n perp = perplexity(logits, l2array, char_lengths, device=args.cuda)\n l += loss.item()\n tot_perp += perp.item()\n val_loss = l/len(dev_loader.dataset)\n if val_loss < best_val_loss:\n best_val_loss = val_loss\n prev_best_epoch = e\n torch.save(model.state_dict(), CKPT_PATH)\n '''\n write_transcripts(\n path=os.path.join(args.save_directory, 'submission.csv'),\n args=args, model=model, loader=test_loader, charset=charset)\n '''\n elif e - prev_best_epoch > args.patience:\n break\n print_log('Val Loss: %f' % val_loss, LOG_PATH)\n print_log('Avg Val Perplexity: %f' % (tot_perp/len(train_loader.dataset)), LOG_PATH)\n cer_val = cer(args, model, dev_loader, charset, dev_ys, device=args.cuda)\n print_log('CER: %f' % cer_val, LOG_PATH)\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"fisher_eng/baseline_miami/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"8713768","text":"from random import randint\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.keras.datasets.mnist as input_data\nimport matplotlib.pyplot as plt\nimport cv2\nimport pickle\nfrom tqdm import tqdm\nimport os\nimport warnings\nimport torch\nimport torchvision\n\nclass ImageDataset():\n \"\"\"\n Class for holding image datasets that are loaded in manually\n Contains all batch preprocessing features as well\n \"\"\"\n\n train = None\n x_train = None\n y_train = None\n\n test = None\n x_test = None\n y_test = None\n\n validation = None\n x_valid = None\n y_valid = None\n\n shape = None\n num_classes = None\n batch_size = 128\n repeat_size = 5\n shuffle = 128\n validation_split = 0.2\n\n\n def __init__(self, dir=None, type=None):\n \"\"\"\n Constructor for building the TensorFlow dataset\n\n Loads the train, test, and validation pickle files from the specified directory and applies\n normalization on the dataset, followed by a setup of either a tensorflow or pytorch batch iterator\n\n :param str dir: the path to the dataset containing pickle files\n :param str type: the default tensorflow dataset to load in (only supports MNIST)\n \"\"\"\n\n batch_iter_load_type = None\n\n if type is not None:\n batch_iter_load_type = type.split(\"_\")[0]\n if type == 'TF_MNIST':\n mnist = input_data.read_data_sets('data/MNIST/', one_hot=True)\n self.x_train = mnist.train.images\n self.y_train = mnist.train.labels\n self.x_train = self.x_train.astype(np.float32)\n\n self.shape = list(self.x_train.shape)\n print(\"Shape: \", self.shape)\n self.num_classes = self.y_train.shape[1]\n print(\"Unique Classes: \", self.num_classes)\n\n self.x_valid = mnist.validation.images\n self.y_valid = mnist.validation.labels\n self.x_valid = self.x_valid.astype(np.float32)\n\n self.x_test = mnist.test.images\n self.y_test = mnist.test.labels\n self.x_test = self.x_test.astype(np.float32)\n\n print(\"Preprocessing train data...\")\n self.normalize_image_pixels(self.x_train)\n print(\"Preprocessing test data...\")\n self.normalize_image_pixels(self.x_test)\n print(\"Preprocessing validation data...\")\n self.normalize_image_pixels(self.x_valid)\n\n elif type == \"TORCH_MNIST\":\n self.train = torchvision.datasets.MNIST(\"./data\", transform=None, download=True, train=True)\n self.x_train = self.train.data.numpy() / 255.0\n self.y_train = self.train.targets.numpy()\n\n self.shape = list(self.x_train.shape)\n print(\"Shape: \", self.shape)\n self.num_classes = len(set(self.y_train))\n print(\"Unique Classes: \", self.num_classes)\n\n self.test = torchvision.datasets.MNIST(\"./data\", transform=None, download=True, train=False)\n self.x_test = self.test.data.numpy() / 255.0\n self.y_test = self.test.targets.numpy()\n\n # print(\"Preprocessing train and validation data...\")\n # self.preprocess_normalize_only(self.x_train)\n # print(\"Preprocessing test data...\")\n # self.preprocess_normalize_only(self.x_test)\n\n self.train = torch.utils.data.TensorDataset(torch.FloatTensor(self.x_train),\n torch.LongTensor(self.y_train))\n self.test = torch.utils.data.TensorDataset(torch.FloatTensor(self.x_test),\n torch.LongTensor(self.y_test))\n self.validation_size = int(self.validation_split * len(self.x_train))\n self.train_size = len(self.x_train) - self.validation_size\n self.train, self.validation = torch.utils.data.random_split(self.train, [self.train_size, self.validation_size])\n\n else:\n raise Exception(\"Loading this dataset is not currently supported by the batch iterator\")\n\n\n if batch_iter_load_type == \"TF\":\n self.setup_tf_batch_iterator(self.x_train, self.y_train)\n elif batch_iter_load_type == \"TORCH\":\n self.setup_torch_data_loaders()\n self.x_valid, self.y_valid = [x[0] for x in iter(self.validation).next()]\n self.x_valid = np.array(self.x_valid)\n self.y_valid = np.array(self.y_valid)\n else:\n raise Exception(\"Module not supported for this batch iterator...\")\n\n elif dir is not None:\n batch_iter_load_type = dir.split(\"_\")[0]\n self.load(dir)\n print(\"Preprocessing train data...\")\n self.normalize_image_pixels(self.x_train)\n print(\"Preprocessing test data...\")\n self.normalize_image_pixels(self.x_test)\n print(\"Preprocessing validation data...\")\n self.normalize_image_pixels(self.x_valid)\n\n if batch_iter_load_type == \"TF\":\n self.setup_tf_batch_iterator(self.x_train, self.y_train)\n elif batch_iter_load_type == \"TORCH\":\n self.setup_torch_data_loaders()\n else:\n raise Exception(\"Module not supported for this batch iterator...\")\n\n def load(self, directory:str) -> None:\n \"\"\"\n Populates the train, test, and validation global variables with raw data from the pickled files\n\n :param str directory: the path to the dataset containing pickle files\n :return: None, the class variables are populated accordingly\n \"\"\"\n\n self.train = pickle.load(open(directory + 'train.p', 'rb'))\n self.x_train, self.y_train = self.train['features'], self.train['labels']\n self.x_train = self.x_train.astype(np.float32)\n\n self.shape = self.x_train[0].shape\n print(\"Shape: \", self.shape)\n self.num_classes = len(np.unique(self.y_train))\n print(\"Unique Classes: \", self.num_classes)\n\n self.test = pickle.load(open(directory + 'test.p', 'rb'))\n self.x_test, self.y_test = self.test['features'], self.test['labels']\n self.x_test = self.x_test.astype(np.float32)\n\n self.validation = pickle.load(open(directory + 'valid.p', 'rb'))\n self.x_valid, self.y_valid = self.validation['features'], self.validation['labels']\n self.x_valid = self.x_valid.astype(np.float32)\n\n def setup_torch_data_loaders(self):\n \"\"\"\n Constructs a PyTorch dataloader\n\n :return: None, the dataloader is constructed from tensors and stored in class variables\n \"\"\"\n self.train = torch.utils.data.DataLoader(self.train, batch_size=self.batch_size, shuffle=True)\n self.validation = torch.utils.data.DataLoader(self.validation, batch_size=self.batch_size)\n self.test = torch.utils.data.DataLoader(self.test, batch_size=self.batch_size)\n\n def setup_tf_batch_iterator(self, features:np.ndarray, labels:np.ndarray) -> None:\n \"\"\"\n Constructs a TensorFlow dataset from the features and labels and sets up the batch iterator\n\n :param np.ndarray features: the images of the dataset\n :param np.ndarray labels: the labels for each feature\n :return: None, the batch iterator is constructed from tensors and stored in class variables\n \"\"\"\n\n print(\"Setting up batch iterator...\")\n\n data = tf.data.Dataset.from_tensor_slices((features, labels))\n data = data.shuffle(len(self.y_train), reshuffle_each_iteration=True).batch(self.batch_size)\n\n iterator = tf.data.Iterator.from_structure(data.output_types, data.output_shapes)\n self.train_init = iterator.make_initializer(data)\n self.x_batch, self.y_batch = iterator.get_next()\n\n def display_one(self, a:np.ndarray, title1=\"Original\") -> None:\n \"\"\"\n Helper function for displaying an image\n\n :param np.ndarray a: the image to display\n :param str title1: the title of the image to display when plotting\n :return: None, the image is rendered and shown\n \"\"\"\n\n plt.imshow(a)\n plt.title(title1)\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n def display_two(self, a:np.ndarray, b:np.ndarray, title1=\"Original\", title2=\"Edited\") -> None:\n \"\"\"\n Helper function for displaying two images, usually for comparing before and after transformations\n\n :param np.ndarray a: the \"before\" image to display (should be the original image before any preprocessing\n :param np.ndarray b: the \"after\" image to display (after applying any transformations)\n :param str title1: the title of the \"before\" image to display when plotting\n :param str title2: the title of the \"after\" image to display when plotting\n :return: None, both images are rendered and shown\n \"\"\"\n\n plt.subplot(121)\n plt.imshow(a)\n plt.title(title1)\n plt.xticks([])\n plt.yticks([])\n plt.subplot(122)\n plt.imshow(b)\n plt.title(title2)\n plt.xticks([])\n plt.yticks([])\n plt.show()\n\n def preprocess(self, features:np.ndarray) -> np.ndarray:\n \"\"\"\n Main function for preprocessing images\n\n :param np.ndarray features: the batch of images to perform preprocessing on\n :return: np.ndarray the modified batch of features\n \"\"\"\n\n for i, img in (enumerate(features)):\n img = self.preprocess_improved(img)\n features[i] = img\n return features\n\n def preprocess_improved(self, image:np.ndarray) -> np.ndarray:\n \"\"\"\n Main function for preprocessing images\n\n :param np.ndarray image: the image to apply a random transformation on\n :return: np.ndarray image the modified image\n \"\"\"\n\n choice = randint(0, 3)\n if choice == 0:\n image = image\n elif choice == 1:\n image = self.perform_hist_eq(image)\n elif choice == 2:\n image = self.translate(image)\n elif choice == 3:\n image = self.gaussian(image)\n\n return self.normalize_image_pixels(image)\n\n def preprocess_normalize_only(self, features:np.ndarray) -> np.ndarray:\n \"\"\"\n Main function for normalizing images only\n :param np.ndarray features: the batch of images to perform preprocessing on\n :return: np.ndarray the modified batch of features\n \"\"\"\n\n for i, img in (enumerate(features)):\n img = self.normalize_image_pixels(img)\n features[i] = img\n\n return features\n\n def perform_hist_eq(self, image: np.ndarray) -> np.ndarray:\n \"\"\"\n Takes in an image and performs histogram equalization -> improves contrast\n\n :param np.ndarray image: the image to apply histogram equalization on\n :return: np.ndarray image the modified image\n \"\"\"\n\n R, G, B = cv2.split(image.astype(np.uint8))\n\n img_r = cv2.equalizeHist(R)\n img_g = cv2.equalizeHist(G)\n img_b = cv2.equalizeHist(B)\n\n image = cv2.merge((img_r, img_g, img_b))\n\n return image.astype(np.float32)\n\n def translate(self, image, height=32, width=32, max_trans=5) -> np.ndarray:\n \"\"\"\n Applies a random translation in height and/or width\n\n :param np.ndarray image: the image to apply random translation on\n :param int height: the height of the image\n :param int width: the width of the image\n :param int max_trans: a max value to shift the height and width by\n :return: np.ndarray image the modified image\n \"\"\"\n\n translate_x = max_trans * np.random.uniform() - max_trans / 2\n translate_y = max_trans * np.random.uniform() - max_trans / 2\n translation_mat = np.float32([[1, 0, translate_x], [0, 1, translate_y]])\n trans = cv2.warpAffine(image, translation_mat, (height, width))\n return trans\n\n def gaussian(self, image, ksize=(11, 11), border=0) -> np.ndarray:\n \"\"\"\n Applies Gaussian Blur to the image\n\n :param np.ndarray image: the image to apply Gaussian Blur on\n :param tuple ksize: a tuple of 2 ints that describes the kernel size\n :param int border: value to apply a border on the image\n :return: np.ndarray image the modified image\n \"\"\"\n\n return cv2.GaussianBlur(image, ksize, border)\n\n def normalize_image_pixels(self, image:np.ndarray) -> np.ndarray:\n \"\"\"\n Function to normalize the image pixels. Assumes that the np.ndarray passed in contains values\n from [0,255] and normalizes it down to a value that is [0, 1)\n\n Revised to preprocess based on zero mean/unit variance, old code commented out\n\n :param np.ndarray image: the image to apply normalization on\n :return: np.ndarray image the modified image\n \"\"\"\n\n # for normalizing pixels\n return np.divide(image, 255.0)\n\n # for converting images to zero mean and unit variance\n # formula: z-score = x - mean / std\n # return (image - image.mean()) / image.std()\n # return np.divide(np.subtract(image, np.mean(image)), np.std(image))\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Main method for testing the data loader\n \"\"\"\n\n # Ignore warnings\n warnings.filterwarnings(\"ignore\")\n tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n # # Testing GTSRB\n # print(\"Testing GTSRB...\")\n # path = \"GTSRB/\"\n # gtsrb = ImageDataset(path)\n # n_train = len(gtsrb.x_train)\n # n_valid = len(gtsrb.x_valid)\n # n_test = len(gtsrb.x_test)\n # image_shape = gtsrb.shape\n # n_classes = gtsrb.num_classes\n #\n # print(\"Number of training examples =\", n_train)\n # print(\"Number of testing examples =\", n_test)\n # print(\"Number of validation examples =\", n_valid)\n # print(\"Image data shape =\", image_shape)\n # print(\"Number of classes =\", n_classes)\n #\n # print()\n\n # Testing MNIST\n print(\"Testing MNIST...\")\n mnist = ImageDataset(type=\"TF_MNIST\")\n n_train = len(mnist.x_train)\n n_valid = len(mnist.x_valid)\n n_test = len(mnist.x_test)\n image_shape = mnist.shape\n n_classes = mnist.num_classes\n\n print(\"Number of training examples =\", n_train)\n print(\"Number of testing examples =\", n_test)\n print(\"Number of validation examples =\", n_valid)\n print(\"Image data shape =\", image_shape)\n print(\"Number of classes =\", n_classes)\n\n print()\n\n # Testing PyTorch MNIST\n print(\"Testing PyTorch MNIST...\")\n mnist = ImageDataset(type=\"TORCH_MNIST\")\n n_train = len(mnist.train.dataset)\n n_valid = len(mnist.validation.dataset)\n n_test = len(mnist.test.dataset)\n image_shape = mnist.shape\n n_classes = mnist.num_classes\n\n print(\"Number of training examples =\", n_train)\n print(\"Number of testing examples =\", n_test)\n print(\"Number of validation examples =\", n_valid)\n print(\"Image data shape =\", image_shape)\n print(\"Number of classes =\", n_classes)","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":15300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"42369919","text":"'''\nStack1e - The Ultimate Stacking Game\n'''\n\nimport pygame\nimport os\nfrom pygame.locals import *\nfrom math import sin\n\nboardsize = boardwidth, boardheight = 12, 20\nscreensize = screenwidth, screenheight = 240, 400\ntilewidth = screenwidth/boardwidth\ntileheight = screenheight/boardheight\ntilesize = tilewidth, tileheight\n\ntilecolor = (232, 130, 6)\ntilecolor2 = (6, 232, 134)\n\nblack = (0,0,0)\n\nlevelspd = (80, 80, 75, 75, 70, 70, 65, 65, 60, 55, 50, 45, 40, 35, 30)\nmaxwidth = (3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3)\n\ncolorchangey = 10\nwinlevel = 15\n\ncurrent_speed = 50 #in milliseconds\nboard = []\nlose_tiles = []\ncurrent_direction = 1\ncurrent_x, current_y, current_width = 0, boardheight - 1, 3\ncurrent_level = 0\n\nintro = 0\nplaying = 1\nlose = 2\nwin = 3\n\ngame_state = intro\n\ndata_py = os.path.abspath(os.path.dirname(__file__))\ndata_dir = os.path.normpath(os.path.join(data_py, 'data'))\n\ndef filepath(filename):\n #Determine the path to a file in the data directory.\n return os.path.join(data_dir, filename)\n\ndef load_image(filename):\n return pygame.image.load(os.path.join(data_dir, filename))\n\nbg_images = (load_image(\"intro.png\"), load_image(\"game.png\"), load_image(\"lose.png\"), load_image(\"win.png\"))\n\nbg_images[win].set_colorkey(black)\nbg_images[lose].set_colorkey(black)\n\nrunning = True\npygame.init()\npygame.display.set_caption('Stack1e')\n\ndef main():\n\tglobal game_state, current_x, current_y, current_speed, running, current_width, current_level\n\n\n\tscreen = pygame.display.set_mode(screensize)\n\n\treset_game()\n\n\twhile(running):\n\t\tupdate_movement()\n\t\tupdate_board_info()\n\t\tupdate_screen(screen)\n\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\trunning = False\n\t\t\telif event.type == KEYDOWN:\n\t\t\t\tif event.key == K_SPACE:\n\t\t\t\t\tkey_hit()\n\t\t\t\telif event.key == K_ESCAPE:\n\t\t\t\t\tif game_state == intro:\n\t\t\t\t\t\trunning = False\n\t\t\t\t\telse:\n\t\t\t\t\t\treset_game()\n\t\t\t\telif event.key == K_LCTRL: #for cheater scum\n\t\t\t\t\tcurrent_x -= 1\n\t\t\t\t\tif (current_x < 0): current_x = 0\n\t\t\t\t\tcurrent_width += 1\n\t\t\t\t\tif (current_width >= boardwidth): current_width = boardwidth - 1\n\n\tpygame.display.quit()\n\ndef reset_game():\n\tglobal game_state, current_x, current_y, current_speed, running, current_width, current_level, lose_tiles\n\n\tclear_board()\n\tlose_tiles = []\n\n\trunning = True\n\n\tgame_state = intro\n\n\tcurrent_x = 0\n\tcurrent_y = boardheight - 1\n\tcurrent_level = 0\n\tcurrent_speed = levelspd[current_level]\n\tcurrent_width = maxwidth[current_level]\n\ndef key_hit():\n\tglobal running, game_state, current_x, current_y, current_width, current_speed, current_level, lose_tiles\n\n\tif game_state == playing:\n\t\tif current_y < boardheight - 1:\n\t\t\tfor x in range(current_x, current_x + current_width):\n\t\t\t\tif board[x][current_y + 1] == 0: # Collision check\n\t\t\t\t\tcurrent_width -= 1 #Give one less block next time\n\t\t\t\t\tboard[x][current_y] = 0 # Remove extra blocks\n\t\t\t\t\tlose_tiles.append((x, current_y, pygame.time.get_ticks())) #Block falling animation\n\n\t\tcurrent_level += 1\n\t\tcheck_win_lose()\n\t\tcurrent_y -= 1\n\telif game_state == intro:\n\t\tgame_state = playing\n\telif (game_state == lose) or (game_state == win):\n\t\treset_game()\n\t\tgame_state = intro\n\telse:\n\t\trunning = False\n\ndef check_win_lose():\n\tglobal game_state, current_width, current_level, current_speed, running, tilecolor\n\n\tif current_width == 0:\n\t\tgame_state = lose\n\telif current_level == winlevel:\n\t\tcurrent_speed = 100\n\t\tgame_state = win\n\telse:\n\t\tcurrent_speed = levelspd[current_level]\n\t\tif current_width > maxwidth[current_level]:\n\t\t\tcurrent_width = maxwidth[current_level]\n\nlast_time = 0\ndef update_movement():\n\tglobal game_state, last_time, current_x, current_y, current_width, current_speed, current_direction\n\n\tcurrent_time = pygame.time.get_ticks()\n\tif (last_time + current_speed <= current_time):\n\t\tif game_state == playing:\n\t\t\tnew_x = current_x + current_direction\n\n\t\t\tif (new_x < 0) or (new_x + current_width > boardwidth):\n\t\t\t\tcurrent_direction = -current_direction\n\n\t\t\tcurrent_x += current_direction\n\n\t\tlast_time = current_time\n\ndef update_screen(screen):\n\tglobal game_state\n\n\tif game_state == playing:\n\t\tdraw_background(screen)\n\t\tdraw_board(screen)\n\telif game_state == intro:\n\t\tdraw_background(screen)\n\t\tpass\n\telif (game_state == lose) or (game_state == win):\n\t\tscreen.fill(black)\n\t\tdraw_board(screen)\n\t\tdraw_background(screen)\n\n\tpygame.display.flip()\n\ndef draw_background(screen):\n\tglobal game_state\n\tscreen.blit(bg_images[game_state], (0,0,screenwidth,screenheight),\t(0,0,screenwidth,screenheight))\n\n\ndef update_board_info():\n\tglobal game_state\n\n\tif game_state == playing:\n\t\tclear_row(current_y)\n\t\tfill_current_row()\n\ndef draw_board(screen):\n\tfor x in range(boardwidth):\n\t\tfor y in range(boardheight):\n\t\t\tif board[x][y] == 1:\n\t\t\t\tdraw_tile(screen, x, y)\n\n\tdraw_lose_tiles(screen)\n\ndef draw_tile(screen, x, y):\n\txoffset = 0 #Win animation\n\tcol = tilecolor\n\tif (y < colorchangey):\n\t\tcol = tilecolor2\n\n\tif game_state == win:\n\t\txoffset = sin(pygame.time.get_ticks() * 0.004 + y * 0.5) * (screenwidth / 4)\n\n\tpygame.draw.rect(screen, col, (x * tilewidth + xoffset, y * tileheight, tilewidth, tileheight))\n\tpygame.draw.rect(screen, black, (x * tilewidth + xoffset, y * tileheight, tilewidth, tileheight), 2)\n\n#Block falling animation\ndef draw_lose_tiles(screen):\n\tfor lt in lose_tiles:\n\t\tfalltime = (pygame.time.get_ticks() - lt[2]) * 0.008 #Time falling\n\t\tx = lt[0] * tilewidth\n\t\ty = lt[1] * tileheight + falltime * falltime\n\n\t\tcol = tilecolor\n\t\tif (lt[1] < colorchangey):\n\t\t\tcol = tilecolor2\n\n\t\tif (y > screenheight):\n\t\t\tlose_tiles.remove(lt)\n\t\telse:\n\t\t\tpygame.draw.rect(screen, col, (x+2, y+2, tilewidth-3, tileheight-3))\n\ndef clear_board():\n\tglobal board\n\n\tboard = []\n\tfor x in range(boardwidth):\n\t\tboard.append([])\n\t\tfor y in range (boardheight):\n\t\t\tboard[x].append(0)\n\ndef clear_row(y):\n\tfor x in range(boardwidth):\n\t\tboard[x][y] = 0\n\ndef fill_current_row():\n\tglobal current_x, current_y, current_width\n\tfor x in range(current_x, current_x + current_width):\n\t\tboard[x][current_y] = 1\nmain()\n","sub_path":"2019/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"443312157","text":"from __future__ import absolute_import\nfrom six import with_metaclass\n\nfrom keras.models import Sequential\nfrom keras.layers import recurrent\nfrom keras.layers import core\n\nfrom bulbea.learn.models import Supervised\n\nclass ANN(Supervised):\n pass\n\nclass RNNCell(object):\n RNN = recurrent.SimpleRNN\n GRU = recurrent.GRU\n LSTM = recurrent.LSTM\n\nclass RNN(ANN):\n def __init__(self, sizes,\n cell = RNNCell.LSTM,\n dropout = 0.2,\n activation = 'linear',\n loss = 'mse',\n optimizer = 'rmsprop',\n metrics = ['accuracy']): # JRD - added metrics=['accuracy']\n self.model = Sequential() \n self.model.add(cell(\n input_shape = (None,sizes[0]),\n units = sizes[1],\n return_sequences = True\n ))\n\n for i in range(2, len(sizes) - 1):\n self.model.add(cell(sizes[i], return_sequences = False))\n self.model.add(core.Dropout(dropout))\n\n self.model.add(core.Dense(units = sizes[-1]))\n self.model.add(core.Activation(activation))\n\n self.model.compile(loss = loss, optimizer = optimizer, metrics = metrics) # JRD - added metrics = metrics \n \n def fit(self, X, y, *args, **kwargs):\n \"\"\"Keras - Trains the model for a fixed number of epochs (iterations on a dataset).\n # Arguments\n x: Input data.\n y: Target data.\n # Returns\n A `History` object. Its `History.history` attribute is\n a record of training loss values and metrics values\n at successive epochs, as well as validation loss values\n and validation metrics values (if applicable).\"\"\"\n return self.model.fit(X, y, *args, **kwargs)\n\n def predict(self, X):\n \"\"\"Keras - Generates output predictions for the input samples.\n # Arguments\n X: Input data.\n # Returns\n Numpy array(s) of predictions.\"\"\"\n return self.model.predict(X)\n\n def evaluate(self, X, y):\n \"\"\"Keras - Returns the loss value & metrics values for the model in test mode.\n # Arguments\n x: Input data.\n y: Target data.\n # Returns\n Scalar test loss (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\"\"\"\n return self.model.evaluate(X, y, batch_size=128)\n\n","sub_path":"bulbea/learn/models/ann.py","file_name":"ann.py","file_ext":"py","file_size_in_byte":2635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"182242009","text":"import logging\nimport os\n\nimport pytest\n\nimport runez\nfrom runez.base import stringified\nfrom runez.conftest import cli, isolated_log_setup, IsolatedLogSetup, logged, temp_folder\nfrom runez.context import CaptureOutput\nfrom runez.convert import short\nfrom runez.file import get_lines\nfrom runez.logsetup import LogManager\n\n\nLOG = logging.getLogger(__name__)\nrunez.date.DEFAULT_TIMEZONE = runez.date.UTC\nrunez.serialize.set_default_behavior(strict=False, extras=True)\n\n\n# This is here only to satisfy flake8, mentioning the imported fixtures so they're not declared \"unused\"\nassert all(s for s in [cli, isolated_log_setup, logged, temp_folder])\n\n\nclass TempLog(object):\n def __init__(self, folder, tracked):\n \"\"\"\n :param str folder: Temp folder\n :param runez.TrackedOutput tracked: Tracked output\n \"\"\"\n self.folder = folder\n self.tracked = tracked\n self.stdout = tracked.stdout\n self.stderr = tracked.stderr\n\n @property\n def logfile(self):\n if LogManager.file_handler:\n return short(LogManager.file_handler.baseFilename)\n\n def expect_logged(self, *expected):\n assert self.logfile, \"Logging to a file was not setup\"\n remaining = set(expected)\n with open(LogManager.file_handler.baseFilename, \"rt\") as fh:\n for line in fh:\n found = [msg for msg in remaining if msg in line]\n remaining.difference_update(found)\n if remaining:\n LOG.info(\"File contents:\")\n LOG.info(\"\\n\".join(get_lines(LogManager.file_handler.baseFilename)))\n assert not remaining\n\n def clear(self):\n self.tracked.clear()\n\n def __repr__(self):\n return stringified(self.tracked)\n\n def __str__(self):\n return self.folder\n\n def __contains__(self, item):\n return item in self.tracked\n\n def __len__(self):\n return len(self.tracked)\n\n\n@pytest.fixture\ndef temp_log():\n with IsolatedLogSetup():\n with CaptureOutput() as tracked:\n yield TempLog(os.getcwd(), tracked)\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"609714239","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python3.6\nimport requests\nfrom parsel import Selector\n\nclass GoogleMerchantFeed:\n '''Фид для Google Merchant'''\n #url = 'https://marko.biz.ua/google_merchant_center.xml?hash_tag=63112d878cb232b7c77baadf6c68b955&product_ids=&group_ids=24728881&label_ids='\n url = 'https://marko.biz.ua/google_merchant_center.xml?hash_tag=63112d878cb232b7c77baadf6c68b955&product_ids=&group_ids=&label_ids=1197755'\n def __init__(self, gid, title, description, link, price, availability, image, mpn, brand):\n self.gid = gid\n self.title = title\n self.description = description\n self.link = link\n self.price = price\n self.availability = availability\n self.image = image\n self.mpn = mpn\n self.brand = brand\n \n def __repr__(self):\n return self.title\n \n# Парсинг данных и сохранение в класс GoogleMerchantFeed\nr = requests.get(GoogleMerchantFeed.url)\nr.encoding = 'utf-8'\nr = r.text\nhub = []\nsel = Selector(text=r, type='xml')\nitems = sel.xpath('//item')\nfor item in items:\n gid = item.xpath('./*[name()=\"g:id\"]/text()').get()\n title = item.xpath('./*[name()=\"g:title\"]/text()').get()\n description = item.xpath('./*[name()=\"g:description\"]/text()').get().split('Основные размеры:')[0]\n link = item.xpath('./*[name()=\"g:link\"]/text()').get()\n price = item.xpath('./*[name()=\"g:price\"]/text()').get()\n availability = item.xpath('./*[name()=\"g:availability\"]/text()').get()\n image = item.xpath('./*[name()=\"g:image_link\"]/text()').get()\n brand = item.xpath('./*[name()=\"g:brand\"]/text()').get()\n mpn = 'mpn'\n hub.append(GoogleMerchantFeed(gid, title, description, link, price, availability, image, mpn, brand))\n\n# Запись данных в файл\nmy_file = open('google.xml', 'w')\nmy_file.write('\\n\\n\\nИнтернет-магазин «Марко»\\nhttps://marko.biz.ua\\nRSS 2.0 product data feed\\n')\nfor item in hub:\n my_file.write('\\n')\n my_file.write(''+item.gid+'\\n')\n my_file.write(''+item.title+'')\n my_file.write(''+item.description+'\\n')\n my_file.write(''+item.link+'\\n')\n my_file.write(''+item.image+'\\n')\n my_file.write('new\\n')\n my_file.write(''+item.availability+'\\n')\n my_file.write(''+item.price+'\\n')\n my_file.write(''+item.brand+'\\n')\n my_file.write(''+item.mpn+'\\n')\n my_file.write('Постельное белье\\n')\n my_file.write('4171\\n')\n my_file.write('\\n')\nmy_file.write('\\n')\nmy_file.close()\n","sub_path":"prom_merchant.py","file_name":"prom_merchant.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"627147531","text":"import numpy as np\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\nimport datetime as dt\nfrom flask import Flask, jsonify\n\n\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\")\nBase = automap_base()\nBase.prepare(engine, reflect=True)\n\nmeasurement = Base.classes.measurement\nstation = Base.classes.station\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef home_page():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:
\"\n f\"/api/v1.0/precipitation
\"\n f\"/api/v1.0/stations
\"\n f\"/api/v1.0/tobs
\"\n f\"/api/v1.0/\"\n f\"/api/v1.0//\"\n f\"/api/v1.0/\"\n f\"/api/v1.0//\"\n )\n\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n session = Session(engine)\n prcp_results = session.query(measurement.date, measurement.prcp).all()\n session.close()\n \n all_dates = {date:prcp for date, prcp in prcp_results}\n \n precipitation = []\n for date, prcp in prcp_results:\n prcp_dict = {}\n prcp_dict[\"Date\"] = date\n prcp_dict[\"Precipitation\"] = prcp\n precipitation.append(prcp_dict)\n\n return jsonify([precipitation])\n\n\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n session = Session(engine)\n results = session.query(station.station).all()\n session.close()\n all_stations = list(np.ravel(results))\n\n return jsonify(all_stations)\n\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n session = Session(engine)\n tobs_results = session.query(measurement.date, measurement.tobs, measurement.prcp).\\\n filter(measurement.date >= '2016-08-23').\\\n filter(measurement.station =='USC00519281').\\\n order_by(measurement.date).all()\n session.close()\n \n tobs_tobs = [] \n for prcp, date, tobs in tobs_results:\n tobs_dict = {}\n tobs_dict[\"prcp\"] = prcp\n tobs_dict[\"date\"] = date\n tobs_dict[\"tobs\"] = tobs\n tobs_tobs.append(tobs_dict)\n\n return jsonify(tobs_tobs)\n\n@app.route(\"/api/v1.0/\")\ndef start_date(start):\n session = Session(engine)\n start_results = session.query(measurement.date, func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\\\n filter(measurement.date >= start).all()\n session.close()\n\n temperatures = list(np.ravel(start_results))\n \n return jsonify(temperatures)\n\n@app.route(\"/api/v1.0//\")\ndef startdate_enddate(start, end):\n session = Session(engine)\n start_end_results = session.query(measurement.date, func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\\\n filter(measurement.date >= start).\\\n filter(measurement.date <= end).all()\n session.close()\n\n start_end = list(np.ravel(start_end_results))\n\n return jsonify(start_end)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"351174579","text":"from selenium import webdriver\nimport time\n\n\nbrowser=webdriver.Firefox()\nbrowser.get(\"https://twitter.com/\")\ntime.sleep(1)\n\ngiris_yap=browser.find_element_by_xpath(\"//*[@id='doc']/div/div[1]/div[1]/div[2]/div[2]/div/a[2]\")\n\ngiris_yap.click()\n\ntime.sleep(1)\n\nusername=browser.find_element_by_xpath(\"//*[@id='page-container']/div/div[1]/form/fieldset/div[1]/input\")\npassword=browser.find_element_by_xpath(\"//*[@id='page-container']/div/div[1]/form/fieldset/div[2]/input\")\naccount_giris_yap=browser.find_element_by_xpath(\"//*[@id='page-container']/div/div[1]/form/div[2]/button\")\n\nusername.send_keys(\"@oguz42kara\")\npassword.send_keys(\"cccgfreis1998\")\ntime.sleep(1)\naccount_giris_yap.click()\ntime.sleep(1)\nsearchArea=browser.find_element_by_xpath(\"//*[@id='search-query']\")\nsearchButton=browser.find_element_by_xpath(\"//*[@id='global-nav-search']/span/button\")\nsearchArea.send_keys(\"#yazılımayolver\")\ntime.sleep(1)\nsearchButton.click()\ntime.sleep(1)\n\n\nlenOfPage = browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;\")\nmatch=False\nwhile(match==False):\n lastCount = lenOfPage\n time.sleep(5)\n lenOfPage = browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;\")\n if lastCount == lenOfPage:\n match=True\n\n\nelements=browser.find_elements_by_css_selector(\".TweetTextSize.js-tweet-text.tweet-text\")\ntweets=[]\ntime.sleep(2)\ntweetCount=1\nfor element in elements:\n tweets.append(element.text)\nwith open (\"tweets.txt\",\"w\",encoding=\"UTF-8\") as file:\n\n for tweet in tweets:\n file.write(str(tweetCount)+\".\\n\"+tweet+\"\\n\")\n tweetCount +=1\n\nbrowser.close()\n\n\n\n\n\n\n","sub_path":"Selenium Python/Selenium ve Twitter/getAllTweets.py","file_name":"getAllTweets.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"339926806","text":"\"\"\"This file contains the default processes supported by this module.\n\"\"\"\n\nimport os\nfrom dotenv import dotenv_values\n\nfrom src.data_getter import DataGetter\nfrom src.utils import *\nfrom src.models import *\n\n\ndef get_and_save_songs(artist_uri: str, json_name: str, csv_name: str, non_inc_albs: List[str] = [], non_inc_songs: List[str] = []):\n \"\"\"Obtains songs from a specific artist and saves them as .json and .csv.\n\n Identifies the authoring artist by the artist_uri as obtained from Spotify. Does not include songs present in albums whose name matches one of the non_inc_albums entries. Does not include songs whose name matches one of the non_inc_songs entries. Saves the songs as .json into the file with the name json_name. Saves the songs as .csv into the file with the name csv_name. \n\n Args:\n artist_uri (str): the artist uri as obtained from Spotify.\n json_name (str): the file name specifying where to store the .json format of songs data.\n csv_name (str): the file name specifying where to store the .csv format of songs data.\n non_inc_albs (List[str], optional): The list of names of albums to exclude. Defaults to [].\n non_inc_songs (List[str], optional): The list of names of songs to exclude. Defaults to [].\n \"\"\"\n\n base_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n config = dotenv_values(os.path.join(base_dir, \".env\"))\n client_id = config.get(\"SPOTIPY_CLIENT_ID\")\n client_secret = config.get(\"SPOTIPY_CLIENT_SECRET\")\n\n data_getter = DataGetter(client_id, client_secret)\n songs = data_getter.get_songs_data(artist_uri, non_inc_albs, non_inc_songs)\n\n json_loc = os.path.join(base_dir, json_name)\n success = save_songs_as_json(songs, json_loc)\n\n if success:\n print(\"Songs data saved into the file `%s`.\" % json_loc)\n\n csv_loc = os.path.join(base_dir, csv_name)\n success = json_to_csv(json_loc, csv_loc)\n\n if success:\n print(\"Songs data transformed from `%s` to `%s`.\" % (json_loc, csv_loc))\n \n else:\n print(\"Songs data failed to transform into the .csv fromat.\")\n\n else:\n print(\"Saving was not successful.\")\n\n\ndef get_and_save_ITM_songs():\n \"\"\"Sets up all parameters as needed to obtain all studio songs by In This Moment. \n \"\"\"\n get_and_save_songs(\"spotify:artist:6tbLPxj1uQ6vsRQZI2YFCT\", \"database/itm_songs.json\", \"database/itm_songs.csv\", non_inc_albs=[\"Blood at the Orpheum (Live)\", \"Blood\"], non_inc_songs=[\"Interview (Bonus)\"])\n\n\nif __name__==\"__main__\":\n get_and_save_ITM_songs()\n","sub_path":"src/processes.py","file_name":"processes.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"378477460","text":"import pexpect\nfrom boardfarm.lib.common import retry_on_exception\nfrom boardfarm.lib.installers import apt_install\nfrom nested_lookup import nested_lookup\n\n\ndef add_dns_auth_record(dns, sipserver_name):\n '''\n To add a record and srv to the dns server\n\n Parameters:\n dns(object): device where the dns server is installed\n sipserver_name(string): name of the sipserver\n '''\n sip_domain = sipserver_name + \".boardfarm.com\"\n #removing the auth record lines if present\n rm_dns_auth_record(dns)\n dns.sendline('cat >> /etc/dnsmasq.conf << EOF')\n dns.sendline('auth-zone=%s' % sip_domain)\n dns.sendline('auth-soa=12345678,admin.%s' % sip_domain)\n dns.sendline('srv-host=_sip._tcp,%s,5060,20,10' % sip_domain)\n dns.sendline('srv-host=_sip._tcp,%s,5060,20,10' % sip_domain)\n dns.sendline('mx-host=%s' % sip_domain)\n dns.sendline('EOF')\n dns.expect(dns.prompt)\n dns.sendline('/etc/init.d/dnsmasq restart')\n dns.expect(dns.prompt)\n\n\ndef rm_dns_auth_record(dns):\n '''\n To remove A record and srv to the dns server\n\n Parameters:\n dns(object): device where the dns server is installed\n '''\n dns.sendline(\n 'sed \\'/auth-zone\\=/,/mx-host\\=/d\\' /etc/dnsmasq.conf > /etc/tmpfile.txt'\n )\n dns.expect(dns.prompt)\n dns.sendline('mv /etc/tmpfile.txt /etc/dnsmasq.conf')\n dns.expect(dns.prompt)\n dns.sendline('/etc/init.d/dnsmasq restart')\n dns.expect(dns.prompt)\n\n\ndef voice_devices_configure(voice_devices_list, sip_server):\n '''\n Initialize the Voice test setup\n\n Parameters:\n voice_devices_list(list of obj): list of voice devices\n sip_server(obj): sipserver device\n '''\n try:\n for voice_device in voice_devices_list:\n if hasattr(voice_device, \"profile\"):\n boot_list = nested_lookup(\n \"on_boot\", voice_device.profile.get(voice_device.name, {}))\n for profile_boot in boot_list:\n profile_boot()\n if 'softphone' in voice_device.name:\n voice_device.phone_config(\n sip_server.get_interface_ipaddr(sip_server.iface_dut))\n except Exception as e:\n sip_server.kill_asterisk()\n raise Exception(\n \"Unable to initialize Voice devices, failed due to the error : \",\n e)\n\n\ndef dns_setup_sipserver(sip_server, config):\n '''\n To setup dns with auth records\n\n Parameters:\n sip_server(obj): sipserver device\n '''\n try:\n if sip_server:\n sip_server.prefer_ipv4()\n sip_server.sendline('echo \"nameserver 8.8.8.8\" > /etc/resolv.conf')\n apt_install(sip_server, 'dnsmasq')\n sip_server.setup_dnsmasq(config)\n add_dns_auth_record(sip_server, sip_server.name)\n except Exception as e:\n raise Exception(\"Unable to initialize dns, failed due to the error : \",\n e)\n\n\ndef basic_call(sipcenter, caller, callee, board, sipserver_ip, dial_number,\n tcid):\n '''\n To make a basic call\n\n Parameters:\n sipcenter(object): sipcenter device\n caller(object): caller device\n callee(object): callee device\n sipserver_ip(string): sipserver_ip\n dial_number(string): number to be dialed\n\n Return:\n media_out(string): media output through which tones are validated\n '''\n #phone start\n retry_on_exception(caller.phone_start, ())\n retry_on_exception(callee.phone_start, ())\n #phone dial\n caller.dial(dial_number, sipserver_ip)\n #phone answer\n callee.answer()\n #board verify\n media_out = board.check_media_started(tcid)\n #call hangup\n board.expect(pexpect.TIMEOUT, timeout=20)\n board.send_sip_offhook_onhook(flag=\"onhook\", tcid=tcid)\n #phone kill\n caller.phone_kill()\n callee.phone_kill()\n return media_out\n","sub_path":"boardfarm/lib/voice.py","file_name":"voice.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"198239491","text":"from Event import Seminar\nfrom server import user_manager\nfrom User import User\n\n\ndef get_detailed_event_list(event_list, id):\n user = user_manager.get_user_by_id(id)\n deat_e_list = []\n for e in event_list:\n deat_e_list.append(e)\n if isinstance(e, Seminar):\n s_list = e.registered_sessions(user)\n for s in s_list:\n deat_e_list.append(s)\n\n return deat_e_list\n","sub_path":"printable_event_list.py","file_name":"printable_event_list.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"223871824","text":"import configparser\n\ncf=configparser.ConfigParser()\n\nprint(\"**********begin**********\\n\")\n\nprint(\"**********read**********\")\n''' read '''\ncf.read(\"test.cfg\")\nval=\"default\"\ntry:\n val=cf.get(\"default\", \"test1\")\n print(cf.items(\"main\"))\n print(cf.items(\"default\"))\n print(val)\nexcept Exception:\n pass\n \n\n''' write '''\nprint(\"**********write**********\")\ncf1=configparser.ConfigParser()\ncf1.add_section(\"section1\")\ncf1.set(\"section1\", \"key1\", \"1\")\ncf1.set(\"section1\", \"key2\", \"2\")\n\ncf1.add_section(\"section2\")\n#cf1.set(\"section2\", \"key2_1\", 1)\n\nwith open(\"testw.cfg\", \"w\") as f:\n cf1.write(f)\n\nprint(\"**********end**********\")","sub_path":"configfile/python/parseCfg.py","file_name":"parseCfg.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"96940756","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nfrom scipy.stats import multivariate_normal\nimport matplotlib.pyplot as plt\nfrom random import randint\nimport pandas as pd\nimport numpy as np\nimport math\n\n\n# In[2]:\n\n\ndf_train = np.array(pd.read_table('./EMGaussian.data')).tolist()\ndf_test = np.array(pd.read_table('./EMGaussian.test')).tolist()\n\ndf_train_x_1 = [float(x[0].split(' ')[0]) for x in df_train]\ndf_train_x_2 = [float(x[0].split(' ')[1]) for x in df_train]\ndf_test_x_1 = [float(x[0].split(' ')[0]) for x in df_test]\ndf_test_x_2 = [float(x[0].split(' ')[1]) for x in df_test]\n\n\n# In[6]:\n\n\n#K-mean algorithm\nK = 4\nmu = [[0,0]]*K\nclasses = [[]]*K\ndistances = []\nn = len(df_train_x_1)\n\n#initialize the algorithm\nfor i in range(0,K):\n rand = randint(0,n-1)\n mu[i] = [df_train_x_1[rand],df_train_x_2[rand]]\n\nitermax = 50\nit = 0\n\n\n# In[7]:\n\n\nwhile it epsilon ):\n \n pi = [0]*K\n tau = tau_current\n \n # new mean called mu\n for j in range(0,K):\n tau_column = tau[:,j]\n sum_x1 = 0\n sum_x2 = 0\n for i in range(0,n-1): \n sum_x1 = sum_x1 + tau[i,j]*df_train_x_1[i]\n sum_x2 = sum_x2 + tau[i,j]*df_train_x_2[i]\n mu_star[j] = [sum_x1/tau_column.sum(), sum_x2/tau_column.sum()]\n pi[j] = tau_column.sum()\n \n # new pi (normalized)\n for j in range(0,K):\n pi_star[j] = (1/(np.sum(pi)))*pi[j]\n\n # covariances computations\n for j in range(0,K):\n tau_column = tau[:,j]\n sum = 0.0\n for i in range(0,n-1): \n sum = sum + tau[i,j]*( (df_train_x_1[i] - mu_star[j][0])**2 + (df_train_x_2[i] - mu_star[j][1])**2 )\n sigma[j] = np.sqrt(sum/(2*tau_column.sum()))\n\n # new tau matrix\n for i in range(0,n):\n # normalization factor computation\n sum = 0\n for j in range(0,K):\n sum = sum + pi_star[j]*(1/(sigma[j]**2))*np.exp(-0.5*(1/sigma[j]**2)*((df_train_x_1[i] - mu_star[j][0])**2 + (df_train_x_2[i] - mu_star[j][1])**2))\n # probabilities computations\n for j in range(0,K):\n tau[i,j] = (1/sum)*pi_star[j]*(1/(sigma[j]**2))*np.exp(-0.5*(1/sigma[j]**2)*((df_train_x_1[i] - mu_star[j][0])**2 + (df_train_x_2[i] - mu_star[j][1])**2))\n\n tau_next = tau \n \n \n \n\n\n# In[11]:\n\n\ntau_next\n\n\n# In[10]:\n\n\n### plot of data points + covariance matrix\nfor j in range(0,K):\n mu = np.array(mu_star[j])\n Sigma = np.array([[sigma[j]**2 , 0], [0, sigma[j]**2]])\n N = 60\n X = np.linspace(-10, 10, N)\n Y = np.linspace(-10, 10, N)\n X, Y = np.meshgrid(X, Y)\n pos = np.empty(X.shape + (2,))\n pos[:, :, 0] = X\n pos[:, :, 1] = Y\n F = multivariate_normal(mu, Sigma)\n Z = F.pdf(pos)\n plt.plot([elem[0] for elem in classes[j]],[elem[1] for elem in classes[j]],colors[j],markersize=2)\n plt.contour(X,Y,Z)\nplt.show()\n\n\n# In[18]:\n\n\n### with general covariance matrix\n\n# E-steps and M-steps iterations\n# means computation\nepsilon_general = 0.01\nmu_star_general = [[0,0]]*K\npi_star_general = [0]*K\nsigma_general = [None]*K\ntau_next_general, tau_current_general = np.zeros(shape=(n,K)), np.zeros(shape=(n,K))\ntau_current_general = tau_init\n\nwhile ( abs(norm2(tau_next_general) - norm2(tau_current_general)) > epsilon_general ):\n \n tau_general = tau_current_general\n \n pi_general = [0]*K\n \n # new mean called mu_general\n for j in range(0,K):\n tau_column_general = tau_general[:,j]\n sum_x1 = 0\n sum_x2 = 0\n for i in range(0,n-1): \n sum_x1 = sum_x1 + tau_general[i,j]*df_train_x_1[i]\n sum_x2 = sum_x2 + tau_general[i,j]*df_train_x_2[i]\n mu_star_general[j] = [sum_x1/tau_column_general.sum(), sum_x2/tau_column_general.sum()]\n pi_general[j] = tau_column_general.sum()\n \n # new pi_general (normalized)\n for j in range(0,K):\n pi_star_general[j] = (1/(np.sum(pi_general)))*pi_general[j]\n\n # covariances computations\n for j in range(0,K):\n tau_current_general = tau_general[:,j]\n sum = np.zeros(shape=(2,2))\n for i in range(0,n-1): \n arr = np.array([df_train_x_1[i], df_train_x_2[i]]) - np.array([mu_star_general[j][0],mu_star_general[j][1]])\n arr.shape = (2,1)\n sum = sum + tau_general[i,j]*arr.dot(arr.transpose())\n sigma_general[j] = (1/tau_current_general.sum())*sum\n\n # new tau_general matrix\n for i in range(0,n-1):\n # normalization factor computation\n sum = 0\n for j in range(0,K):\n arr = np.array([df_train_x_1[i], df_train_x_2[i]]) - np.array([mu_star_general[j][0],mu_star_general[j][1]])\n arr.shape = (1,2)\n mult = arr.dot(np.linalg.inv(sigma_general[j]))\n sum = sum + pi_star_general[j]*(1/np.sqrt(np.linalg.det(sigma_general[j])))*np.exp(-0.5*mult.dot(arr.transpose()))\n # probabilities computations\n for j in range(0,K):\n arr = np.array([df_train_x_1[i], df_train_x_2[i]]) - np.array([mu_star_general[j][0],mu_star_general[j][1]])\n arr.shape = (1,2)\n mult = arr.dot(np.linalg.inv(sigma_general[j]))\n tau_general[i,j] = (1/sum)*pi_star_general[j]*(1/np.sqrt(np.linalg.det(sigma_general[j])))*np.exp(-0.5*mult.dot(arr.transpose()))\n\n tau_next_general = tau_general \n\n\n\n# In[ ]:\n\n\n### plot of data points + covariance matrix\nfor j in range(0,K):\n mu = np.array(mu_star_general[j])\n Sigma = sigma_general[j]\n N = 60\n X = np.linspace(-10, 10, N)\n Y = np.linspace(-10, 10, N)\n X, Y = np.meshgrid(X, Y)\n pos = np.empty(X.shape + (2,))\n pos[:, :, 0] = X\n pos[:, :, 1] = Y\n F = multivariate_normal(mu, Sigma)\n Z = F.pdf(pos)\n plt.contour(X,Y,Z)\n plt.plot([elem[0] for elem in classes[j]],[elem[1] for elem in classes[j]],colors[j],markersize=2)\nplt.show()\n\n\n# In[17]:\n\n\n# log-likelihood computations\ntau_next_general\n\n\n# In[ ]:\n\n\nnp.transpose(arr).shape\n\n","sub_path":"PGM_dm2.py","file_name":"PGM_dm2.py","file_ext":"py","file_size_in_byte":8072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"257492109","text":"#!/usr/bin/env python3\n# -*- coding=utf-8 -*-\n# 本脚由亁颐堂现任明教教主编写,用于乾颐盾Python课程!\n# 教主QQ:605658506\n# 亁颐堂官网www.qytang.com\n# 教主技术进化论拓展你的技术新边疆\n# https://ke.qq.com/course/271956?tuin=24199d8a\n\nimport boto3\nimport os\nos.environ['AWS_DEFAULT_REGION'] = 'us-west-1'\n\ns3 = boto3.client('s3')\ns3.create_bucket(\n Bucket='webapp-aws',\n CreateBucketConfiguration={'LocationConstraint': 'AWS_DEFAULT_REGION'}\n)\n","sub_path":"create_s3_bucket.py","file_name":"create_s3_bucket.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"62179098","text":"#!/usr/bin/python3\n\nimport argparse\nimport math\n\n\nHELP = '''Basic CLI calculator v0.2\nUsage: python calc2.py [OPTION]... arg1 arg2 ...\n'''\n\n\ndef floor_or_ceil(func):\n def wrapper(f, c, nums):\n ans = func(f, c, args)\n if f:\n return math.floor(ans)\n if c:\n return math.ceil(ans)\n return ans\n return wrapper\n\n\n@floor_or_ceil\ndef addition(f, c, nums):\n return sum(nums.numbers)\n\n\n@floor_or_ceil\ndef subtraction(f, c, nums):\n return nums.numbers[0] - nums.numbers[1]\n\n\n@floor_or_ceil\ndef multiplication(f, c, nums):\n p = 1\n for num in nums.numbers:\n p *= num\n return p\n\n\n@floor_or_ceil\ndef division(f, c, nums):\n return nums.numbers[0] / nums.numbers[1]\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(prog=HELP)\n group = parser.add_mutually_exclusive_group()\n\n group.add_argument('-f', '--floor', action='store_true',\n help='Round down the result')\n group.add_argument('-c', '--ceil', action='store_true',\n help='Round up the result')\n\n parser.add_argument('numbers', type=float, nargs='*', metavar='N',\n help='Accumulator for the operands')\n parser.add_argument('-a', '--add', action='store_true',\n help='Add all the operands')\n parser.add_argument('-s', '--subtract', action='store_true',\n help='Subtract the second operand from the last.'\\\n 'Ignores other operands. ')\n parser.add_argument('-m', '--multiply', action='store_true',\n help='Multiply all the operands')\n parser.add_argument('-d', '--divide', action='store_true',\n help='Divide the first operand by the second.'\\\n ' Ignores other operands.')\n\n args = parser.parse_args()\n nums = args.numbers\n\n if args.add:\n print(addition(args.floor, args.ceil, nums))\n elif args.subtract:\n print(subtraction(args.floor, args.ceil, nums))\n elif args.multiply:\n print(multiplication(args.floor, args.ceil, nums))\n elif args.divide:\n print(division(args.floor, args.ceil, nums))\n else:\n print(nums)\n\n","sub_path":"basic_cli/calc2.py","file_name":"calc2.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"624088014","text":"import numpy as np\nimport scipy.interpolate as si\ndef read_experiment_data( filename ):\n \n file_handler = open( filename, \"r\" );\n \n data = np.genfromtxt(file_handler, skip_header=9, dtype=None, usecols=range(0,3)); #excluding the symtype col\n exp_bias = data[:,0]\n exp_current = data[:,1] \n \n truebias = np.linspace(-0.2, 0.2, int(exp_bias.shape[0]/2))\n \n truecurrent = si.griddata(exp_bias, exp_current, truebias, method='nearest')\n file_handler.close()\n \n return truebias, truecurrent\ndef read_experiment( filename ):\n exp_bias, exp_current = read_experiment_data(filename)\n \n dI = np.max(exp_current) - np.min(exp_current)\n dV = np.max(exp_bias) - np.min(exp_bias)\n \n exp_background = lambda V: dI/dV * V\n \n exp_current -= exp_background(exp_bias)\n \n return exp_bias, exp_current \ndef calculate_error( param_bias, param_current, param_exp ):\n error_func = 0.0\n param_bias = np.array( param_bias )\n param_current = np.array( param_current )\n param_exp = np.array( param_exp )\n \n if param_bias.shape[0] == param_current.shape[0] and param_current.shape[0] == param_exp.shape[0]:\n peak_current = param_current.max()\n peak_exp = param_exp.max()\n \n param_current /= peak_current\n param_exp /= peak_exp \n \n squares = np.square( param_exp - param_current)\n sum_least_squares = squares.sum()\n \n scaler = 1.0\n if peak_current > peak_exp:\n scaler = peak_current / peak_exp\n #print peak_current, peak_exp, scaler\n else:\n scaler = peak_exp / peak_current\n #print peak_current, peak_exp, scaler\n \n error_func = sum_least_squares\n #print scaler, error_func, sum_least_squares\n return scaler, error_func, error_func * scaler\n else:\n raise Exception(\"Calculate Error: Arguments should have the same shape.\")\n ","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"109274871","text":"for i in range(100,1000):\r\n if i == (i // 100) ** 3 + ((i % 100) // 10) ** 3 + (i % 10) **3:\r\n print(i)\r\n\r\n#答案:\r\n'''\r\nfor i in range(100, 1000):\r\n sum = 0\r\n temp = i\r\n while temp:\r\n sum = sum + (temp%10) ** 3\r\n temp //= 10 # 注意这里要使用地板除哦~\r\n if sum == i:\r\n print(i)\r\n'''\r\n\r\n#题目:\r\n'''\r\n编写一个程序,求 100~999 之间的所有水仙花数。\r\n如果一个 3 位数等于其各位数字的立方和,则称这个数为水仙花数。\r\n例如:153 = 1^3 + 5^3 + 3^3,因此 153 就是一个水仙花数\r\n'''\r\n","sub_path":"水仙花数.py","file_name":"水仙花数.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"641298791","text":"# encoding: UTF-8\n# Autor: Roberto Martínez Román\n# Muestra cómo utilizar pygame para escribir programas que dibujan en la pantalla\n\nimport pygame\n\n# Dimensiones de la pantalla\nANCHO = 800\nALTO = 600\n# Colores\nBLANCO = (255, 255, 255) # R,G,B en el rango [0,255]\nVERDE_BANDERA = (0, 122, 0)\nROJO = (255, 0, 0)\nAZUL = (0, 0, 255)\nNEGRO = (0, 0, 0)\n\n\n# Estructura básica de un programa que usa pygame para dibujar\n\n# Crea 60 enemigos y los agrega a la lista\ndef crearEnemigos(listaEnemigos, imgEnemigo):\n for renglon in range(1,6): # 1..5\n for columna in range(1,13): # 1..12\n enemigo = pygame.sprite.Sprite()\n enemigo.image = imgEnemigo\n enemigo.rect = imgEnemigo.get_rect()\n enemigo.rect.left = columna*58\n enemigo.rect.top = renglon*60\n listaEnemigos.append(enemigo)\n\n\n# Dibuja TODOS los enemigos sobre la ventana\ndef dibujarEnemigos(ventana, listaEnemigos):\n for enemigo in listaEnemigos:\n ventana.blit(enemigo.image, enemigo.rect)\n\n\ndef dibujarBalas(ventana, listaBalas):\n for bala in listaBalas:\n ventana.blit(bala.image, bala.rect)\n\n\ndef actualizarBalas(listaBalas):\n # MOVER\n for bala in listaBalas:\n bala.rect.top -= 20\n\n # BORRAR. No USAR iterador cuando borran datos de la lista\n for k in range(len(listaBalas)-1, -1, -1): # al revés\n bala = listaBalas[k]\n if bala.rect.top <= - bala.rect.height:\n listaBalas.remove(bala)\n\n\ndef checarColisiones(listaBalas, listaEnemigos, efecto):\n destruidos = 0\n for iB in range(len(listaBalas)-1, -1, -1):\n bala = listaBalas[iB]\n for iE in range(len(listaEnemigos)-1, -1, -1):\n enemigo = listaEnemigos[iE]\n xb, yb, ab, alb = bala.rect\n xe, ye, ae, ale = enemigo.rect\n if xb>=xe and xb<=xe+ae and yb>=ye and yb<=ye+ale:\n listaBalas.remove(bala)\n listaEnemigos.remove(enemigo)\n # Contarlo\n destruidos += 1\n # efecto de sonido\n efecto.play()\n break\n\n return destruidos\n\ndef dibujar():\n # Inicializa el motor de pygame\n pygame.init()\n ventana = pygame.display.set_mode((ANCHO, ALTO)) # Crea la ventana de dibujo\n reloj = pygame.time.Clock() # Para limitar los fps\n termina = False # Bandera para saber si termina la ejecución\n\n # Imágenes\n imgFondo = pygame.image.load(\"fondoMenu.jpg\")\n imgBtnJugar = pygame.image.load(\"jugar.png\")\n imgBtnAcercaDe = pygame.image.load(\"acercaDe.png\")\n\n spriteBtnJugar = pygame.sprite.Sprite()\n spriteBtnJugar.image = imgBtnJugar\n spriteBtnJugar.rect = imgBtnJugar.get_rect()\n spriteBtnJugar.rect.left = ANCHO//2 - spriteBtnJugar.rect.width//2\n spriteBtnJugar.rect.top = ALTO//3 - spriteBtnJugar.rect.height//2\n\n spriteBtnAcercaDe = pygame.sprite.Sprite()\n spriteBtnAcercaDe.image = imgBtnAcercaDe\n spriteBtnAcercaDe.rect = imgBtnAcercaDe.get_rect()\n spriteBtnAcercaDe.rect.left = ANCHO//2 - spriteBtnAcercaDe.rect.width//2\n spriteBtnAcercaDe.rect.top = 2*ALTO//3\n\n # ESTADOS del juego\n MENU = 1\n JUEGO = 2\n ACERCA_DE = 3\n GANA = 4\n estadoJuego = MENU # JUEGO, ACERCA_DE\n\n # ENEMIGOS\n imgEnemigo = pygame.image.load(\"enemigoAbajo.png\")\n listaEnemigos = []\n crearEnemigos(listaEnemigos, imgEnemigo)\n\n # Personaje. NAVE\n imgNave = pygame.image.load(\"nave.png\")\n nave = pygame.sprite.Sprite()\n nave.image = imgNave\n nave.rect = imgNave.get_rect()\n nave.rect.left = ANCHO//2\n nave.rect.top = ALTO - nave.rect.height\n\n # BALAS\n imgBala = pygame.image.load(\"bala.png\")\n listaBalas = [] # Al inicio no hay balas\n\n # SONIDO efecto al destruir un enemigo\n pygame.mixer.init()\n efectoDestruye = pygame.mixer.Sound(\"shoot.wav\")\n\n # Pantalla FIN (Gana)\n # Pantalla BLANCA, letrero GANAS...\n puntos = 0 # Naves destruidas\n fuente = pygame.font.SysFont(\"monospace\", 76)\n\n # Timer\n # Registra un evento, se activa cada 1000 milisegundos (un segundo en este caso)\n pygame.time.set_timer(pygame.USEREVENT, 1000)\n\n while not termina: # Ciclo principal\n # Procesa los eventos que recibe el programa\n for evento in pygame.event.get():\n if evento.type == pygame.USEREVENT:\n print(\"Timer\") # Este mensaje se imprime cada segundo, lo puedes ver en la consola\n if evento.type == pygame.QUIT: # El usuario hizo click en el botón de salir\n termina = True\n if evento.type == pygame.MOUSEBUTTONDOWN:\n xm, ym = pygame.mouse.get_pos()\n if estadoJuego == MENU:\n xbj, ybj, abj, albj = spriteBtnJugar.rect\n if xm>=xbj and xm<=xbj+abj:\n if ym>=ybj and ym<=ybj+albj:\n estadoJuego = JUEGO # Cambia de estado\n if evento.type == pygame.KEYDOWN and estadoJuego==JUEGO:\n if evento.key == pygame.K_LEFT:\n nave.rect.left -= 10\n elif evento.key == pygame.K_RIGHT:\n nave.rect.left += 10\n elif evento.key == pygame.K_SPACE:\n # dispara\n bala = pygame.sprite.Sprite()\n bala.image = imgBala\n bala.rect = imgBala.get_rect()\n bala.rect.left = nave.rect.left + nave.rect.width//2\n bala.rect.top = nave.rect.top\n listaBalas.append(bala)\n\n # Borrar pantalla\n ventana.fill(NEGRO)\n\n # Dibujar, aquí haces todos los trazos que requieras\n # Normalmente llamas a otra función y le pasas -ventana- como parámetro, por ejemplo, dibujarLineas(ventana)\n if estadoJuego == MENU:\n ventana.blit(imgFondo, (0,0))\n ventana.blit(spriteBtnJugar.image, spriteBtnJugar.rect)\n ventana.blit(spriteBtnAcercaDe.image, spriteBtnAcercaDe.rect)\n elif estadoJuego == JUEGO:\n dibujarEnemigos(ventana, listaEnemigos)\n dibujarBalas(ventana, listaBalas)\n ventana.blit(nave.image, nave.rect)\n # Actualizar\n actualizarBalas(listaBalas)\n # Verificar colisiones\n destruidos = checarColisiones(listaBalas, listaEnemigos, efectoDestruye)\n puntos += destruidos\n if puntos >= 3:\n estadoJuego = GANA # termina el juego, GANA\n elif estadoJuego == GANA:\n # Texto GANA\n texto = fuente.render(\"¡GANASTE!\",1,BLANCO)\n ventana.blit(texto, (ANCHO//2-200,ALTO//2))\n\n pygame.display.flip() # Actualiza trazos\n reloj.tick(40) # 40 fps\n\n # Después del ciclo principal\n pygame.quit() # termina pygame\n\n\ndef main():\n dibujar()\n\n\nmain()","sub_path":"Menu.py","file_name":"Menu.py","file_ext":"py","file_size_in_byte":6885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"464120276","text":"\"\"\"\nGiven a sorted linked list, delete all duplicates such that each element appear\nonly once.\n\"\"\"\n\n# Definition for singly-linked list.\n# class ListNode():\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\ndef deleteDuplicates(self, head: ListNode) -> ListNode:\n\n if head.next is None:\n return head\n\n start = curr = head\n\n while curr is not None:\n if curr.next is None:\n break\n elif curr.val == curr.next.val:\n curr.next = curr.next.next\n elif curr.val != curr.next.val:\n curr = curr.next\n\n return start\n","sub_path":"leetcode/083_RemoveDuplicates.py","file_name":"083_RemoveDuplicates.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"521485544","text":"import pandas\nimport random\n\nclass Utility:\n\n @staticmethod\n def __getValueFromFile(directory, key):\n value = None\n\n with open(directory) as file:\n for line in file:\n if line.startswith(key):\n value = line[len(key) + 1:]\n\n break\n \n return str.strip(value)\n \n @staticmethod\n def getValueFromConfigurationFile(key):\n return Utility.__getValueFromFile(\".\\\\configuration.ini\", key)\n \n @staticmethod\n def getDataFrame(fileName):\n fileName = Utility.getValueFromConfigurationFile(\"data-directory\") + fileName\n dataFrame = None\n\n if fileName.endswith(\".xlsx\"):\n dataFrame = pandas.read_excel(fileName)\n elif fileName.endswith(\".csv\"):\n dataFrame = pandas.read_csv(fileName)\n \n return dataFrame\n \n @staticmethod\n def getData(scanFor, dataFrame):\n columnHeaders = list(dataFrame)\n\n if (scanFor in columnHeaders):\n return random.choice(dataFrame[columnHeaders[columnHeaders.index(scanFor)]].dropna())\n else:\n for columnHeader in columnHeaders:\n for data in dataFrame[columnHeader]:\n if scanFor == data:\n return random.choice(dataFrame[columnHeader].dropna())\n \n return random.choice(dataFrame[\"default-response\"].dropna())","sub_path":"Utility.py","file_name":"Utility.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"258766336","text":"import torch\nfrom torchvision.transforms import Compose, ToTensor\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom collections.abc import Iterable\nfrom functools import reduce\nfrom operator import mul\n\nfrom torch.distributions import Normal\nfrom torch.distributions import Bernoulli\nfrom torch.utils import checkpoint\n\nimport scipy\n\nfrom utils import *\nfrom activations import *\nimport copy\n\n\n########################################## non invertible mappings\n# \tbuild based on nn.Module\n#\tcontains polular neural networks\n#\tserve as deterministic mapping for stochastic mapping and flow transform\n########################################## \n\nclass LambdaLayer(nn.Module):\n\tdef __init__(self, lambd):\n\t\tsuper(LambdaLayer,self).__init__()\n\t\tif lambd is None: lambd = lambda x:x\n\t\tself.lambd = lambd\n\n\tdef forward(self,x):\n\t\treturn self.lambd(x)\n\nclass MLP(nn.Sequential):\n\tdef __init__(self,input_size, output_size, hidden_units, activation='relu',in_lambda=None,out_lambda=None):\n\t\tself.input_size = input_size\n\t\tself.output_size = output_size\n\n\t\tlayers = []\n\t\tif in_lambda: layers.append(LambdaLayer(in_lambda))\n\t\tfor in_size, out_size in zip([input_size]+hidden_units[:-1],hidden_units):\n\t\t\tlayers.append(nn.Linear(in_size,out_size))\n\t\t\tlayers.append(act_module(activation))\n\t\tlayers.append(nn.Linear(hidden_units[-1], output_size))\n\t\tif out_lambda: layers.append(LambdaLayer(out_lambda))\n\n\t\tsuper(MLP, self).__init__(*layers)\n\t\t\n\nclass ElementwiseParams(nn.Module):\n\n\tdef __init__(self, num_params, mode='interleaved'):\n\t\tsuper(ElementwiseParams, self).__init__()\n\t\tassert mode in {'interleaved','sequential'}\n\t\tself.num_params = num_params\n\t\tself.mode = mode\n\t\n\tdef forward(self,x):\n\t\tassert x.dim()==2, 'Expected input of shape (B,D)'\n\t\tif self.num_params !=1:\n\t\t\tassert x.shape[1] % self.num_params == 0\n\t\t\tdims = x.shape[1] //self.num_params\n\n\t\t\tif self.mode == 'interleaved':\n\t\t\t\tx = x.reshape(x.shape[0:1] + (self.num_params,dims))\n\t\t\t\tx = x.permute([0,2,1])\n\n\t\t\telif self.mode == 'sequential':\n\t\t\t\tx = x.reshape(x.shape[0:1] + (dims, self.num_params))\n\n\t\treturn x\n\nclass ElementwiseParams1d(nn.Module):\n\n\tdef __init__(self, num_params, mode='interleaved'):\n\t\tsuper(ElementwiseParams1d,self).__init__()\n\t\tassert mode in {'interleaved', 'sequential'}\n\t\tself.num_params = num_params\n\t\tself.mode = mode\n\n\tdef forward(self,x):\n\t\tassert x.dim()==3, 'Expected input of shape (B,D,L)'\n\t\tif self.num_params != 1:\n\t\t\tassert x.shape[1] % self.num_params ==0\n\t\t\tdims = x.shape[1] //self.num_params\n\n\t\t\tif self.mode == 'interleaved':\n\t\t\t\tx = x.reshape(x.shape[0:1] + (self.num_params,dims)+ x.shape[2:])\n\n\t\t\t\tx = x.permute([0,2,3,1])\n\n\t\t\telif self.mode == 'sequential':\n\t\t\t\tx = x.reshape(x.shape[0:1] + (dims, self.num_params) + x.shape[2:])\n\t\t\t\tx = x.permute([0,1,3,2])\n\n\t\treturn x\n\nclass ElementwiseParams2d(nn.Module):\n\n\tdef __init__(self, num_params, mode='interleaved'):\n\t\tsuper(ElementwiseParams2d, self).__init__()\n\t\tassert mode in {'interleaved', 'sequential'}\n\t\tself.num_params = num_params\n\t\tself.mode = mode\n\n\tdef forward(self, x):\n\t\tassert x.dim() == 4, 'Expected input of shape (B,C,H,W)'\n\t\tif self.num_params != 1:\n\t\t\tassert x.shape[1] % self.num_params == 0\n\t\t\tchannels = x.shape[1] // self.num_params\n\t\t\tif self.mode == 'interleaved':\n\t\t\t\tx = x.reshape(x.shape[0:1] + (self.num_params, channels) + x.shape[2:])\n\t\t\t\tx = x.permute([0,2,3,4,1])\n\t\t\telif self.mode == 'sequential':\n\t\t\t\tx = x.reshape(x.shape[0:1] + (channels, self.num_params) + x.shape[2:])\n\t\t\t\tx = x.permute([0,1,3,4,2])\n\t\treturn x\n\n\nclass DensLayer(nn.Module):\n\tdef __init__(self, in_channels, growth, dropout):\n\t\tsuper(DensLayer, self).__init__()\n\n\t\tlayers = []\n\n\t\tlayers.extend([\n\t\t\tnn.Conv2d(in_channels, in_channels, kernel_size=1,\n\t\t\t\tstride=1, padding=0, bias=True),\n\t\t\tnn.ReLU(inplace=True),])\n\n\t\tif dropout>0:\n\t\t\tlayers.append(nn.Dropout(p=dropout))\n\n\t\tlayers.extend([\n\t\t\tnn.Conv2d(in_channels,growth,kernel_size=3,\n\t\t\t\tstride=1,padding=1,bias=True),\n\t\t\tnn.ReLU(inplace=True)\n\t\t\t])\n\n\t\tself.nn = nn.Sequential(*layers)\n\n\tdef forward(self,x):\n\t\th = self.nn(x)\n\t\th = torch.cat([x,h],dim=1)\n\n\t\treturn h\n\n\nclass GatedConv2d(nn.Module):\n\tdef __init__(self, in_channels, out_channels, kernel_size, padding):\n\t\tsuper(GatedConv2d, self).__init__()\n\t\tself.in_channels = in_channels\n\t\tself.conv = nn.Conv2d(in_channels, out_channels*3,\n\t\t\tkernel_size=kernel_size,padding=padding)\n\n\tdef forward(self,x):\n\t\th = self.conv(x)\n\t\ta,b,c = torch.chunk(h,chunks=3,dim=1)\n\n\t\treturn a + b*torch.sigmoid(c)\n\n\nclass DenseBlock(nn.Sequential):\n\n\tdef __init__(self, in_channels, out_channels, depth, growth,\n\t\tdropout=0., gated_conv=False, zero_init=False):\n\t\tlayers = [DensLayer(in_channels + i*growth,growth,dropout) for i in range(depth)]\n\n\t\tif gated_conv:\n\t\t\tlayers.append(GatedConv2d(in_channels + depth*growth, out_channels, kernel_size=1, padding=0))\n\t\telse:\n\t\t\tlayers.append(nn.Conv2d(in_channels+depth*growth, out_channels,kernel_size=1,padding=0))\n\n\t\tif zero_init:\n\t\t\tnn.init.zeros_(layers[-1].weight)\n\t\t\tif hasattr(layers[-1],'bias'):\n\t\t\t\tnn.init.zeros_(layers[-1].bias)\n\n\t\tsuper(DenseBlock,self).__init__(*layers)\n\n\nclass ResidualDenseBlock(nn.Module):\n\tdef __init__(self, in_channels, out_channels, depth, growth,\n\t\tdropout=0., gated_conv=False, zero_init=False):\n\t\tsuper(ResidualDenseBlock,self).__init__()\n\n\t\tself.dense = DenseBlock(in_channels = in_channels,\n\t\t\tout_channels = out_channels,\n\t\t\tdepth = depth,\n\t\t\tgrowth = growth,\n\t\t\tdropout = dropout,\n\t\t\tgated_conv = gated_conv,\n\t\t\tzero_init = zero_init)\n\n\tdef forward(self, x):\n\t\treturn x + self.dense(x)\t\n\n\nclass DenseNet(nn.Sequential):\n\tdef __init__(self, in_channels, out_channels, num_blocks,\n\t\tmid_channels, depth, growth, dropout,\n\t\tgated_conv=False, zero_init=False):\n\n\t\tlayers = [nn.Conv2d(in_channels,mid_channels, kernel_size=1, padding=0)]+[ResidualDenseBlock(in_channels=mid_channels,\n\t\t\tout_channels=mid_channels,\n\t\t\tdepth=depth,\n\t\t\tgrowth=growth,\n\t\t\tdropout=dropout,\n\t\t\tgated_conv=gated_conv,\n\t\t\tzero_init=False) for _ in range(num_blocks)] + [nn.Conv2d(mid_channels,out_channels,kernel_size=1,padding=0)]\n\t\tif zero_init:\n\t\t\tnn.init.zeros_(layers[-1].weight)\n\t\t\tif hasattr(layers[-1],'bias'):\n\t\t\t\tnn.init.zeros_(layers[-1].bias)\n\n\t\tsuper(DenseNet,self).__init__(*layers)\n\nclass MultiscaleDenseNet(nn.Module):\n\tdef __init__(self, in_channels, out_channels, num_scales, num_blocks, mid_channels,\n\t\tdepth, growth, dropout, gated_conv=False, zero_init=False):\n\n\t\tsuper(MultiscaleDenseNet, self).__init__()\n\t\tassert num_scales >1\n\t\tself.num_scales = num_scales\n\n\n\t\tdef get_densenet(cin, cout, zinit=False):\n\t\t\treturn DenseNet(in_channels=cin,\n\t\t\t\tout_channels=cout,\n\t\t\t\tnum_blocks=num_blocks,\n\t\t\t\tmid_channels=mid_channels,\n\t\t\t\tdepth=depth,\n\t\t\t\tgrowth=growth,\n\t\t\t\tdropout=dropout,\n\t\t\t\tgated_conv=gated_conv,\n\t\t\t\tzero_init=zinit)\n\n\t\tself.down_in = get_densenet(in_channels, mid_channels)\n\n\t\tdown = []\n\t\tfor i in range(num_scales -1):\n\t\t\tdown.append(nn.Sequential(nn.Conv2d(mid_channels, mid_channels, kernel_size=2, padding=0,stride=2),\n\t\t\t\tget_densenet(mid_channels,mid_channels)))\n\n\t\tself.down = nn.ModuleList(down)\n\n\t\tup = []\n\n\t\tfor i in range(num_scales -1):\n\t\t\tnp.append(nn.Sequential(get_densenet(mid_channels,mid_channels),\n\t\t\t\tnn.ConvTranspose2d(mid_channels, mid_channels, kernel_size=2, padding=1, stride=2)))\n\n\t\tself.up = nn.ModuleList(up)\n\n\t\tself.up_out = get_densenet(mid_channels, out_channels, zinit=zero_init)\n\n\n\tdef forward(self,x):\n\n\t\td = [self.down_in(x)]\n\n\t\tfor down_layer in self.down:\n\t\t\td.append(down_layer(d[-1]))\n\n\t\tu = [d[-1]]\n\t\tfor i, up_layer in enumerate(self.up):\n\t\t\tu.append(up_layer(u[-1])+d[self.num_scales -2-i])\n\n\t\treturn self.up_out(u[-1])\n\n\n\n\n\n\n### sequence network part \n\nclass DenseTransformerBlock(nn.Module):\n\n\tdef __init__(self, d_model, nhead, dim_feedforward=512, dropout=0.1,activation='gelu',kdim=None, vdim=None, attn_bias=True, checkpoint=False):\n\t\tsuper(DenseTransformerBlock, self).__init__()\n\t\tself.self_attn = nn.MultiheadAttention(d_model,nhead, dropout=dropout, kdim=kdim, vdim=vdim,bias=attn_bias)\n\n\t\tself.linear1 = nn.Linear(d_model, dim_feedforward)\n\t\tself.linear2 = nn.Linear(dim_feedforward, d_model)\n\n\t\tself.norm1 = nn.LayerNorm(d_model)\n\t\tself.norm2 = nn.LayerNorm(d_model)\n\t\tself.dropout1 = nn.Dropout(dropout)\n\t\tself.dropout2 = nn.Dropout(dropout)\n\n\t\tself.activation = act_module(activation)\n\t\tself.checkpoint = checkpoint\n\n\t\tself._reset_parameters()\n\n\n\tdef _reset_parameters(self):\n\t\tnn.init.normal_(self.linear1.weight, std=0.125/math.sqrt(self.linear1.weight.shape[1]))\n\t\tnn.init.normal_(self.linear2.weight, std=0.125/math.sqrt(self.linear2.weight.shape[1]))\n\n\t\tnn.init.zeros_(self.linear1.bias)\n\t\tnn.init.zeros_(self.linear2.bias)\n\n\t\tnn.init.normal_(self.self_attn.in_proj_weight, std = 0.1245/math.sqrt(self.self_attn.in_proj_weight.shape[1]))\n\t\tif not self.self_attn._qkv_same_embed_dim:\n\t\t\tnn.init.normal_(self.self_attn.q_proj_weight,std=0.125/math.sqrt(self.self_attn.q_proj_weight.shape[1]))\n\t\t\tnn.init.normal_(self.self_attn.k_proj_weight, std=0.125/math.sqrt(self.self_attn.k_proj_weight.shape[1]))\n\t\t\tnn.init.normal_(self.self_attn.v_proj_weight, std=0.125/math.sqrt(self.self_attn.v_proj_weight.shape[1]))\n\n\t\tif self.self_attn.in_proj_bias is not None:\n\t\t\tnn.init.zeros_(self.self_attn.in_proj_bias)\n\n\t\tnn.init.normal_(self.self_attn.out_proj.weight, std=0.125/math.sqrt(self.self_attn.out_proj.weight.shape[1]))\n\n\t\tif self.self_attn.out_proj.bias is not None:\n\t\t\tnn.init.zeros_(self.self_attn.out_proj.bias)\n\n\n\tdef _attn_block(self, x, attn_mask=None, key_padding_mask=None):\n\t\tx = self.norm1(x)\n\t\tx = self.self_attn(x,x,x, attn_mask=attn_mask, key_padding_mask=key_padding_mask)[0]\n\t\tx = self.dropout1(x)\n\t\treturn x\n\n\tdef _ff_block(self,x):\n\t\tx = self.norm2(x)\n\t\tx = self.linear2(self.activation(self.linear1(x)))\n\t\tx = self.dropout2(x)\n\t\treturn x\n\n\tdef _forward(self, x, attn_mask=None, key_padding_mask=None):\n\t\tax = self._attn_block(x, attn_mask=attn_mask,key_padding_mask=key_padding_mask)\n\t\tbx = self._ff_block(x+ax)\n\t\treturn x + ax+bx\n\n\tdef forward(self, x, attn_mask=None, key_padding_mask=None):\n\t\tif not self.checkpoint:\n\t\t\treturn self._forward(x,attn_mask, key_padding_mask)\n\t\telse:\n\t\t\tx.requires_grad_(True)\n\t\t\treturn checkpoint.checkpoint(self._forward, x, attn_mask, key_padding_mask)\n\ndef _get_clones(module, N):\n\treturn nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\nclass DenseTransformer(nn.Module):\n\n\tdef __init__(self, d_model=512, nhead=8,\n\t\tnum_layers=6, dim_feedforward=512, dropout=0.1,\n\t\tactivation='gelu',kdim=None, vdim=None,\n\t\tattn_bias=True, checkpoint_blocks=False):\n\n\t\tsuper(DenseTransformer, self).__init__()\n\n\t\tdecoder_layer = DenseTransformerBlock(d_model=d_model,nhead=nhead,dim_feedforward=dim_feedforward,\n\t\t\tdropout=dropout,activation=activation,kdim=kdim,vdim=vdim,attn_bias=attn_bias,checkpoint=checkpoint_blocks)\n\n\t\tself.layers = _get_clones(decoder_layer, num_layers)\n\t\tself.out_norm = nn.LayerNorm(d_model)\n\n\t\tself.num_layers = num_layers\n\t\tself.d_model = d_model\n\t\tself.nhead = nhead\n\n\t\tself._reset_parameters()\n\n\n\tdef forward(self, x, key_padding_mask=None):\n\t\tif x.size(2) != self.d_model:\n\t\t\traise RuntimeError('the feature number of src and tgt must be equal to d_model')\n\n\t\tattn_mask = self.generate_square_subsequent_mask(x.shape[0]).to(x.device)\n\n\t\tfor decoder_layer in self.layers:\n\t\t\tx = decoder_layer(x, attn_mask=attn_mask, key_padding_mask=key_padding_mask)\n\n\t\treturn self.out_norm(x)\n\n\tdef generate_square_subsequent_mask(self, sz):\n\n\t\tmask = (torch.triu(torch.ones(sz,sz))==1).transpose(0,1)\n\t\tmask = mask.float().masked_fill(mask ==0, float('-inf')).masked_fill(mask ==1, float(0.0))\n\n\t\treturn mask\n\n\tdef _reset_parameters(self):\n\n\t\tfor p in self.parameters():\n\t\t\tif p.dim()>1:\n\t\t\t\tnn.init.xavier_uniform_(p)\n\nclass PositionalEncodeingImage(nn.Module):\n\n\tdef __init__(self, image_shape, embedding_dim):\n\t\tsuper(PositionalEncodeingImage, self).__init__()\n\t\tassert len(image_shape) == 3, 'image shape should have length 3: (C,H,W)'\n\t\tself.image_shape = image_shape\n\t\tself.embedding_dim = embedding_dim\n\n\t\tc,h,w = image_shape\n\t\tself.encode_c = nn.Parameter(torch.Tensor(1,c,1,1,embedding_dim))\n\t\tself.encode_h = nn.Parameter(torch.Tensor(1,1,h,1,embedding_dim))\n\t\tself.encode_w = nn.Parameter(torch.Tensor(1,1,1,w,embedding_dim))\n\n\t\tself.reset_parameters()\n\n\tdef reset_parameters(self):\n\n\t\tnn.init.normal_(self.encode_c, std=0.125/math.sqrt(3*self.embedding_dim))\n\t\tnn.init.normal_(self.encode_h, std=0.125/math.sqrt(3*self.embedding_dim))\n\t\tnn.init.normal_(self.encode_w, std=0.125/math.sqrt(3*self.embedding_dim))\n\n\tdef forward(self,x):\n\t\treturn x + self.encode_c + self.encode_h + self.encode_w\n\nclass AutoregressiveShift(nn.Module):\n\n\tdef __init__(self, embed_dim):\n\t\tsuper(AutoregressiveShift, self).__init__()\n\t\tself.embed_dim = embed_dim\n\t\tself.first_token = nn.Parameter(torch.Tensor(1,1,embed_dim))\n\t\tself._reset_parameters()\n\n\tdef _reset_parameters(self):\n\t\tnn.init.xavier_uniform_(self.first_token)\n\n\tdef forward(self,x):\n\t\tfirst_token = self.first_token.expand(1,x.shape[1],self.embed_dim)\n\t\treturn torch.cat([first_token, x[:-1]], dim=0)\n\ndef _prep_zigzag_cs(channels, height, width):\n\n\tdiagonals = [[] for i in range(height+width-1)]\n\n\tfor i in range(height):\n\t\tfor j in range(width):\n\t\t\tsum = i+j\n\t\t\tif(sum%2==0):\n\t\t\t\tdiagonals[sum].insert(0,(i,j))\n\t\t\telse:\n\t\t\t\tdiagonals[sum].append((i,j))\n\n\tidx_list = []\n\tfor d in diagonals:\n\t\tfor idx in d:\n\t\t\tfor c in range(channels):\n\t\t\t\tidx_list.append((c,)+idx)\n\n\tidx0,idx1,idx2 = zip(*idx_list)\n\treturn idx0,idx1,idx2\n\nclass Image2Seq(nn.Module):\n\n\tdef __init__(self, autoregressive_order, image_shape):\n\t\tassert autoregressive_order in {'cwh','whc','zigzag_cs'}\n\t\tsuper(Image2Seq, self).__init__()\n\t\tself.autoregressive_order = autoregressive_order\n\t\tself.channels = image_shape[0]\n\t\tself.height = image_shape[1]\n\t\tself.width = image_shape[2]\n\t\tif autoregressive_order == 'zigzag_cs':\n\t\t\tself.idx0, self.idx1, self.idx2 = _prep_zigzag_cs(self.channels, self.height, self.width)\n\n\tdef forward(self, x):\n\t\tb, dim = x.shape[0], x.shape[-1]\n\t\tl = x.shape[1:-1].numel()\n\t\tif self.autoregressive_order == 'whc':\n\n\t\t\tx = x.permute([1,2,3,0,4])\n\n\t\t\tx = x.reshape(l,b,dim)\n\n\t\telif self.autoregressive_order == 'cwh':\n\n\t\t\tx = x.permute([2,3,1,0,4])\n\n\t\t\tx = x.reshape(l,b,dim)\n\n\t\telif self.autoregressive_order == 'zigzag_cs':\n\n\t\t\tx = x[:, self.idx0, self.idx1, self.idx2, :]\n\n\t\t\tx = x.permute([1,0,2])\n\n\t\treturn x\n\nclass Seq2Image(nn.Module):\n\n\tdef __init__(self, autoregressive_order, image_shape):\n\t\tassert autoregressive_order in {'cwh','whc','zigzag_cs'}\n\t\tsuper(Seq2Image, self).__init__()\n\t\tself.autoregressive_order = autoregressive_order\n\t\tself.channels = image_shape[0]\n\t\tself.height = image_shape[1]\n\t\tself.width = image_shape[2]\n\t\tif autoregressive_order == 'zigzag_cs':\n\t\t\tself.idx0, self.idx1, self.idx2 = _prep_zigzag_cs(self.channels, self.height, self.width)\n\n\tdef forward(self,x):\n\t\tb, dim = x.shape[1], x.shape[2]\n\t\tif self.autoregressive_order == 'whc':\n\t\t\tx = x.reshape(self.channels, self.height, self.width, b, dim)\n\n\t\t\tx = x.permute([3,0,1,2,4])\n\n\t\telif self.autoregressive_order == 'cwh':\n\t\t\tx = x.reshape(self.height, self.width, self.channels, b,dim)\n\t\t\tx = x.permute([3,2,0,1,4])\n\n\t\telif self.autoregressive_order == 'zigzag_cs':\n\t\t\tx = x.permute([1,0,2])\n\t\t\ty = torch.empty((x.shape[0],self.channels, self.height, self.width, x.shape[-1]),dtype=x.dtype, device=x.device)\n\t\t\ty[:, self.idx0,self.idx1, self.idx2,:] = x\n\n\t\t\tx = y\n\n\t\treturn x\n\nclass DenseTransformer2d(nn.Module):\n\n\tdef __init__(self, image_shape, output_dim, num_bits,\n\t\tautoregressive_order='cwh',d_model=512, nhead=8,\n\t\tnum_layers=6, dim_feedforward=2048, dropout=0.1,\n\t\tactivation='relu',kdim=None, vdim=None, \n\t\tattn_bias=True, output_bias=True,\n\t\tcheckpoint_blocks=False,\n\t\tin_lambda = lambda x:x,\n\t\tout_lambda = lambda x:x):\n\n\t\tsuper(DenseTransformer2d, self).__init__()\n\t\tself.image_shape = torch.Size(image_shape)\n\t\tself.autoregressive_order = autoregressive_order\n\t\tself.d_model = d_model\n\t\tself.num_layers = num_layers\n\n\n\t\tself.encode = nn.Sequential(LambdaLayer(in_lambda),\n\t\t\tnn.Embedding(2**num_bits,d_model),\n\t\t\tPositionalEncodeingImage(image_shape=image_shape, embedding_dim=d_model))\n\n\t\tself.im2seq = Image2Seq(autoregressive_order,image_shape)\n\t\tself.seq2im = Seq2Image(autoregressive_order, image_shape)\n\t\tself.ar_shift = AutoregressiveShift(d_model)\n\n\t\tself.transformer = DenseTransformer(d_model=d_model,\n\t\t\tnhead=nhead,\n\t\t\tnum_layers=num_layers,\n\t\t\tdim_feedforward=dim_feedforward,\n\t\t\tdropout=dropout,\n\t\t\tactivation=activation,\n\t\t\tkdim=kdim,\n\t\t\tvdim=vdim,\n\t\t\tattn_bias=attn_bias,\n\t\t\tcheckpoint_blocks=checkpoint_blocks)\n\n\t\tself.out_linear = nn.Linear(d_model, output_dim, bias=output_bias)\n\t\tself.out_lambda = LambdaLayer(out_lambda)\n\n\t\tself._reset_parameters()\n\n\tdef _reset_parameters(self):\n\n\t\tnn.init.zeros_(self.out_linear.weight)\n\t\tif self.out_linear.bias is not None:\n\t\t\tnn.init.zeros_(self.out_linear.bias)\n\n\t\tnn.init.normal_(self.encode._modules['1'].weight, std=0.125/math.sqrt(self.d_model))\n\n\tdef forward(self,x):\n\n\t\tx = self.encode(x.long())\n\t\tx = self.im2seq(x)\n\t\tx = self.ar_shift(x)\n\t\tx = self.transformer(x)\n\t\tx = self.out_linear(x)\n\t\tx = self.seq2im(x)\n\t\treturn self.out_lambda(x)\n\n\n# layer = DenseTransformer2d(image_shape=(3,32,32), output_dim=64, num_bits=8,\n# \t\tautoregressive_order='cwh',d_model=128, nhead=4,\n# \t\tnum_layers=1, dim_feedforward=128, dropout=0.1,\n# \t\tactivation='relu',kdim=None, vdim=None, \n# \t\tattn_bias=True, output_bias=True,\n# \t\tcheckpoint_blocks=False,\n# \t\tin_lambda = lambda x:x,\n# \t\tout_lambda = lambda x:x).cuda()\n# layer(x.cuda())\n\n\nclass PositionalEncoding1d(nn.Module):\n\n\tdef __init__(self, size, embedding_dim):\n\t\tsuper(PositionalEncoding1d, self).__init__()\n\t\tself.size = size\n\t\tself.embedding_dim = embedding_dim\n\t\tself.encode_l = nn.Parameter(torch.Tensor(size,1,embedding_dim))\n\t\tself.reset_parameters()\n\n\tdef reset_parameters(self):\n\n\t\tnn.init.normal_(self.encode_l, std=0.125/math.sqrt(self.embedding_dim))\n\n\tdef forward(self,x):\n\t\treturn x + self.encode_l\n\nclass PositionalEncoding1d_no_embedding(nn.Module):\n\n\tdef __init__(self, size, embedding_dim):\n\t\tsuper(PositionalEncoding1d_no_embedding, self).__init__()\n\t\tself.size = size\n\t\tself.embedding_dim = embedding_dim\n\t\tself.encode_l = nn.Parameter(torch.Tensor(size,embedding_dim))\n\t\tself.reset_parameters()\n\n\tdef reset_parameters(self):\n\n\t\tnn.init.normal_(self.encode_l, std=0.125/math.sqrt(self.embedding_dim))\n\n\tdef forward(self,x):\n\t\treturn x + self.encode_l\n\n\nclass PositionalDenseTransformer(nn.Module):\n\tdef __init__(self, l_input=50, d_input=2, d_output=2, d_model=512, nhead=8,\n\t\tnum_layers=6, dim_feedforward=512, dropout=0.1,\n\t\tactivation='gelu',kdim=None, vdim=None,\n\t\tattn_bias=True, checkpoint_blocks=False,\n\t\tin_lambda= lambda x:x,\n\t\tout_lambda = lambda x:x):\n\n\t\tsuper(PositionalDenseTransformer,self).__init__()\n\n\t\tdecoder_layer = DenseTransformerBlock(d_model=d_model,\n\t\t\t\t\t\t\t\t\t\tnhead=nhead,\n\t\t\t\t\t\t\t\t\t\tdim_feedforward=dim_feedforward,\n\t\t\t\t\t\t\t\t\t\tdropout=dropout,\n\t\t\t\t\t\t\t\t\t\tactivation=activation,\n\t\t\t\t\t\t\t\t\t\tattn_bias=attn_bias,\n\t\t\t\t\t\t\t\t\t\tcheckpoint=checkpoint_blocks)\n\n\t\tself.in_lambda = LambdaLayer(in_lambda)\n\t\tself.in_linear = nn.Linear(d_input, d_model)\n\t\tself.encode = PositionalEncoding1d(l_input, d_model)\n\t\tself.layers = _get_clones(decoder_layer, num_layers)\n\t\tself.out_norm = nn.LayerNorm(d_model)\n\t\tself.out_linear = nn.Linear(d_model, d_output)\n\t\tself.out_lambda = LambdaLayer(out_lambda)\n\n\t\tself.num_layers = num_layers\n\t\tself.d_model = d_model\n\t\tself.nhead = nhead\n\n\t\tself._reset_parameters()\n\n\tdef forward(self,x):\n\n\t\tx = self.in_lambda(x)\n\t\tx = x.permute(2,0,1)\n\n\t\tx = self.in_linear(x)\n\t\tx = self.encode(x)\n\n\t\tfor decoder_layer in self.layers:\n\t\t\tx = decoder_layer(x, attn_mask=None, key_padding_mask=None)\n\n\t\tx = self.out_norm(x)\n\t\tx = self.out_linear(x)\n\n\t\tx = x.permute(1,2,0)\n\t\tx = self.out_lambda(x)\n\n\t\treturn x\n\n\tdef _reset_parameters(self):\n\n\t\tfor decoder_layer in self.layers:\n\t\t\tdecoder_layer.linear2.weight.data /= math.sqrt(2*self.num_layers)\n\t\t\tdecoder_layer.self_attn.out_proj.weight.data /= math.sqrt(2*self.num_layers)\n\n\n\t\tnn.init.zeros_(self.out_linear.weight)\n\t\tif self.out_linear.bias is not None:\n\t\t\tnn.init.zeros_(self.out_linear.bias)\n\n\nclass PositionalDenseTransformer_no_embedding(nn.Module):\n\tdef __init__(self, l_input=50, d_input=2, d_output=2, d_model=512, nhead=8,\n\t\tnum_layers=6, dim_feedforward=512, dropout=0.1,\n\t\tactivation='gelu',kdim=None, vdim=None,\n\t\tattn_bias=True, checkpoint_blocks=False,\n\t\tin_lambda= lambda x:x,\n\t\tout_lambda = lambda x:x):\n\n\t\tsuper(PositionalDenseTransformer_no_embedding,self).__init__()\n\n\t\tdecoder_layer = DenseTransformerBlock(d_model=d_model,\n\t\t\t\t\t\t\t\t\t\tnhead=nhead,\n\t\t\t\t\t\t\t\t\t\tdim_feedforward=dim_feedforward,\n\t\t\t\t\t\t\t\t\t\tdropout=dropout,\n\t\t\t\t\t\t\t\t\t\tactivation=activation,\n\t\t\t\t\t\t\t\t\t\tattn_bias=attn_bias,\n\t\t\t\t\t\t\t\t\t\tcheckpoint=checkpoint_blocks)\n\n\t\tself.in_lambda = LambdaLayer(in_lambda)\n\t\tself.in_linear = nn.Linear(d_input, d_model)\n\t\tself.encode = PositionalEncoding1d_no_embedding(l_input, d_model)\n\t\tself.layers = _get_clones(decoder_layer, num_layers)\n\t\tself.out_norm = nn.LayerNorm(d_model)\n\t\tself.out_linear = nn.Linear(d_model, d_output)\n\t\tself.out_lambda = LambdaLayer(out_lambda)\n\n\t\tself.num_layers = num_layers\n\t\tself.d_model = d_model\n\t\tself.nhead = nhead\n\n\t\tself._reset_parameters()\n\n\tdef forward(self,x):\n\n\t\tx = self.in_lambda(x)\n\n\t\tx = self.in_linear(x)\n\t\tx = self.encode(x)\n\n\t\tfor decoder_layer in self.layers:\n\t\t\tx = decoder_layer(x, attn_mask=None, key_padding_mask=None)\n\n\t\tx = self.out_norm(x)\n\t\tx = self.out_linear(x)\n\n\t\tx = self.out_lambda(x)\n\n\t\treturn x\n\n\tdef _reset_parameters(self):\n\n\t\tfor decoder_layer in self.layers:\n\t\t\tdecoder_layer.linear2.weight.data /= math.sqrt(2*self.num_layers)\n\t\t\tdecoder_layer.self_attn.out_proj.weight.data /= math.sqrt(2*self.num_layers)\n\n\n\t\tnn.init.zeros_(self.out_linear.weight)\n\t\tif self.out_linear.bias is not None:\n\t\t\tnn.init.zeros_(self.out_linear.bias)\n\n\n\n\n\n\n\n\n############### pixel CNN part\n\ndef mask_conv2d_spatial(mask_type, height, width):\n\n\tmask = torch.ones([1,1,height,width])\n\tmask[:,:, height//2, width//2+(mask_type == 'B'):] = 0\n\tmask[:, :, height//2+1:] = 0\n\n\treturn mask\n\n\n\ndef mask_channels(mask_type, in_channels, out_channels, data_channels=3):\n\n\tin_factor = in_channels // data_channels +1\n\tout_factor = out_channels // data_channels +1\n\n\tbase_mask = torch.ones([data_channels,data_channels])\n\tif mask_type =='A':\n\t\tbase_mask = base_mask.tril(-1)\n\n\telse:\n\t\tbase_mask = base_mask.tril(0)\n\n\tmask_p1 = torch.cat([base_mask]*in_factor, dim=1)\n\tmask_p2 = torch.cat([mask_p1]*out_factor, dim=0)\n\n\tmask = mask_p2[0:out_channels,0:in_channels]\n\treturn mask\n\n\ndef mask_conv2d(mask_type, in_channels, out_channels, height, width, data_channels=3):\n\n\tmask = torch.ones([out_channels,in_channels,height,width])\n\tmask[:,:, height//2, width//2] = mask_channels(mask_type,in_channels,out_channels,data_channels)\n\tmask[:,:, height//2, width//2 +1:] = 0\n\n\tmask[:,:,height//2+1:]=0\n\treturn mask\n\nclass _MaskedConv2d(nn.Conv2d):\n\n\tdef register_mask(self, mask):\n\n\t\tself.register_buffer('mask',mask)\n\n\tdef forward(self, x):\n\t\tself.weight.data *= self.mask \n\t\treturn super(_MaskedConv2d,self).forward(x)\n\nclass SpatialMaskedConv2d(_MaskedConv2d):\n\n\tdef __init__(self, *args, mask_type, **kwargs):\n\t\tsuper(SpatialMaskedConv2d,self).__init__(*args, **kwargs)\n\t\tassert mask_type in {'A','B'}\n\t\t_,_, height, width = self.weight.size()\n\t\tmask = mask_conv2d_spatial(mask_type,height,width)\n\t\tself.register_mask(mask)\n\n\nclass MaskedConv2d(_MaskedConv2d):\n\n\tdef __init__(self, *args, mask_type, data_channels=3, **kwargs):\n\t\tsuper(MaskedConv2d,self).__init__(*args, **kwargs)\n\t\tassert mask_type in {'A','B'}\n\t\tout_channels, in_channels, height, width = self.weight.size()\n\n\t\tmask = mask_conv2d(mask_type, in_channels, out_channels, height, width, data_channels)\n\t\tself.register_mask(mask)\n\n\nclass MaskedResidualBlock2d(nn.Module):\n\n\tdef __init__(self, h, kernel_size=3, data_channels=3):\n\t\tsuper(MaskedResidualBlock2d,self).__init__()\n\n\t\tself.conv1 = MaskedConv2d(2*h,h, kernel_size=1, mask_type='B', data_channels=data_channels)\n\t\tself.conv2 = MaskedConv2d(h, h, kernel_size=kernel_size,padding=kernel_size//2,mask_type='B',data_channels=data_channels)\n\t\tself.conv3 = MaskedConv2d(h,2*h, kernel_size=1, mask_type='B', data_channels=data_channels)\n\n\tdef forward(self,x):\n\t\tidentity = x\n\n\t\tx = self.conv1(F.relu(x))\n\t\tx = self.conv2(F.relu(x))\n\t\tx = self.conv3(F.relu(x))\n\n\t\treturn x + identity\n\n\nclass SpatialMaskedResidualBlock2d(nn.Module):\n\tdef __init__(self, h, kernel_size=3):\n\t\tsuper(SpatialMaskedResidualBlock2d,self).__init__()\n\t\tself.conv1 = nn.Conv2d(2*h, h, kernel_size=1)\n\t\tself.conv2 = SpatialMaskedConv2d(h,h,kernel_size=kernel_size,padding=kernel_size//2,mask_type='B')\n\t\tself.conv3 = nn.Conv2d(h, 2*h, kernel_size=1)\n\n\tdef forward(self,x):\n\t\tidentity = x\n\n\t\tx = self.conv1(F.relu(x))\n\t\tx = self.conv2(F.relu(x))\n\t\tx = self.conv3(F.relu(x))\n\n\t\treturn x+identity\n\n\nclass PixelCNN(nn.Sequential):\n\n\tdef __init__(self, in_channels, num_params, filters=128, num_blocks=15, output_filters=1024, kernel_size=3, kernel_size_in=7, init_transforms=lambda x: 2*x-1):\n\n\t\tlayers = [LambdaLayer(init_transforms)]+\\\n\t\t\t[MaskedConv2d(in_channels, 2*filters, kernel_size=kernel_size_in,padding=kernel_size_in//2, mask_type='A', data_channels=in_channels)]+\\\n\t\t\t[MaskedResidualBlock2d(filters, data_channels=in_channels,kernel_size=kernel_size_in) for _ in range(num_blocks)] +\\\n\t\t\t[nn.ReLU(True), MaskedConv2d(2*filters, output_filters, kernel_size=1,mask_type='B',data_channels=in_channels)]+\\\n\t\t\t[nn.ReLU(True),MaskedConv2d(output_filters, num_params*in_channels, kernel_size=1, mask_type='B',data_channels=in_channels)]+\\\n\t\t\t[ElementwiseParams2d(num_params)]\n\n\t\tsuper(PixelCNN, self).__init__(*layers)\n\n\nlayer = PixelCNN(in_channels=3, num_params=5, filters=5, num_blocks=2, output_filters=15,kernel_size=3, kernel_size_in=3)\n\n\n\n########### MADE part\n\nclass MaskedLinear(nn.Linear):\n\n\tdef __init__(self,\n\t\tin_degrees,\n\t\tout_features,\n\t\tdata_features,\n\t\trandom_mask=False,\n\t\trandom_seed=None,\n\t\tis_output=False,\n\t\tdata_degrees=None,\n\t\tbias=True):\n\n\t\tif is_output:\n\t\t\tassert data_degrees is not None\n\t\t\tassert len(data_degrees) == data_features\n\n\t\tsuper(MaskedLinear, self).__init__(in_features=len(in_degrees),\n\t\t\tout_features=out_features,\n\t\t\tbias=bias)\n\n\t\tself.out_features = out_features\n\t\tself.data_features = data_features\n\t\tself.is_output = is_output\n\n\t\tmask, out_degrees = self.get_mask_and_degrees(in_degrees=in_degrees,\n\t\t\tdata_degrees=data_degrees,\n\t\t\trandom_mask=random_mask,\n\t\t\trandom_seed=random_seed)\n\n\t\tself.register_buffer('mask',mask)\n\t\tself.register_buffer('degrees',out_degrees)\n\n\t@staticmethod\n\tdef get_data_degrees(in_features, random_order=False, random_seed=None):\n\t\tif random_order:\n\t\t\trng = np.random.RandomState(random_seed)\n\t\t\treturn torch.from_numpy(rng.permutation(in_features)+1)\n\n\t\telse:\n\t\t\treturn torch.arange(1,in_features+1)\n\n\tdef get_mask_and_degrees(self,in_degrees, data_degrees,random_mask, random_seed):\n\t\tif self.is_output:\n\t\t\tout_degrees = repeat_rows(data_degrees, self.out_features//self.data_features)\n\t\t\tmask = (out_degrees[...,None]>in_degrees).float()\n\n\t\telse:\n\t\t\tif random_mask:\n\t\t\t\tmin_in_degree = torch.min(in_degrees).item()\n\t\t\t\tmin_in_degree = min(min_in_degree,self.data_features-1)\n\t\t\t\trng = np.random.RandomState(random_seed)\n\t\t\t\tout_degrees = torch.from_numpy(rng.randint(min_in_degree,\n\t\t\t\t\tself.data_features,\n\t\t\t\t\tsize=[self.out_features]))\n\n\t\t\telse:\n\t\t\t\tmax_ = max(1,self.data_features-1)\n\t\t\t\tmin_ = min(1,self.data_features-1)\n\t\t\t\tout_degrees = torch.arange(self.out_features)%max_ + min_\n\n\t\t\tmask = (out_degrees[...,None] >= in_degrees).float()\n\n\t\treturn mask, out_degrees\n\n\tdef update_mask_and_degrees(self,in_degrees,data_degrees,random_mask,random_seed):\n\n\t\tmask, out_degrees = self.get_mask_and_degrees(in_degrees=in_degrees,\n\t\t\tdata_degrees=data_degrees,random_mask=random_mask,random_seed=random_seed)\n\n\t\tself.mask.data.copy_(mask)\n\t\tself.degrees.data.copy_(out_degrees)\n\n\tdef forward(self,x):\n\n\t\treturn F.linear(x, self.weight*self.mask, self.bias)\n\n\nclass MADE_Old(nn.Sequential):\n\n\tdef __init__(self, features, num_params, hidden_features, random_order=False, random_mask=False,\n\t\trandom_seed=None, activation='relu',dropout_prob=0.0,batch_norm=False):\n\n\t\tlayers = []\n\n\t\tdata_degrees = MaskedLinear.get_data_degrees(features, random_order=random_order,random_seed=random_seed)\n\t\tin_degrees = copy.deepcopy(data_degrees)\n\t\tfor i,out_features in enumerate(hidden_features):\n\t\t\tlayers.append(MaskedLinear(in_degrees=in_degrees,out_features=out_features,\n\t\t\t\tdata_features=features,random_mask=random_mask,random_seed=random_seed+i if random_seed else None,\n\t\t\t\tis_output=False))\n\n\n\t\t\tin_degrees = layers[-1].degrees\n\t\t\tif batch_norm:\n\t\t\t\tlayers.append(nn.BatchNorm1d(out_features))\n\t\t\tlayers.append(act_module(activation))\n\t\t\tif dropout_prob >0.0:\n\t\t\t\tlayers.append(nn.Dropout(dropout_prob))\n\n\n\t\tlayers.append(MaskedLinear(in_degrees=in_degrees,\n\t\t\tout_features=features*num_params,data_features=features,random_mask=random_mask,\n\t\t\trandom_seed=random_seed,is_output=True,data_degrees=data_degrees))\n\n\t\tlayers.append(ElementwiseParams(num_params, mode='sequential'))\n\n\t\tsuper(MADE_Old, self).__init__(*layers)\n\n\nclass MADE(nn.Sequential):\n\n\tdef __init__(self, features, num_params, hidden_features, random_order=False, random_mask=False,\n\t\trandom_seed=None, activation='relu',dropout_prob=0.0,batch_norm=False):\n\n\t\tlayers = []\n\n\t\tdata_degrees = MaskedLinear.get_data_degrees(features, random_order=random_order,random_seed=random_seed)\n\t\tin_degrees = copy.deepcopy(data_degrees)\n\t\tfor i,out_features in enumerate(hidden_features):\n\t\t\tlayers.append(MaskedLinear(in_degrees=in_degrees,out_features=out_features,\n\t\t\t\tdata_features=features,random_mask=random_mask,random_seed=random_seed+i if random_seed else None,\n\t\t\t\tis_output=False))\n\n\n\t\t\tin_degrees = layers[-1].degrees\n\t\t\tif batch_norm:\n\t\t\t\tlayers.append(nn.BatchNorm1d(out_features))\n\t\t\tlayers.append(act_module(activation))\n\t\t\tif dropout_prob >0.0:\n\t\t\t\tlayers.append(nn.Dropout(dropout_prob))\n\n\n\t\tlayers.append(MaskedLinear(in_degrees=in_degrees,\n\t\t\tout_features=features*num_params,data_features=features,random_mask=random_mask,\n\t\t\trandom_seed=random_seed,is_output=True,data_degrees=data_degrees))\n\n\t\t# layers.append(ElementwiseParams(num_params, mode='sequential'))\n\n\t\tsuper(MADE, self).__init__(*layers)\n\nclass AgnosticMADE(MADE):\n\n\tdef __init__(self, features, num_params, hidden_features, order_agnostic=True,\n\t\tconnect_agnostic=True, num_masks=16, activation='relu', dropout_prob=0.0, batch_norm=False):\n\n\t\tself.features = features\n\t\tself.order_agnostic = order_agnostic\n\t\tself.connect_agnostic = connect_agnostic\n\t\tself.num_masks = num_masks\n\t\tself.current_mask = 0\n\n\t\tsuper(AgnosticMADE, self).__init__(features=features,num_params=num_params,\n\t\t\thidden_features=hidden_features,random_order=order_agnostic,random_mask=connect_agnostic,\n\t\t\trandom_seed=self.current_mask,activation=activation,dropout_prob=dropout_prob,\n\t\t\tbatch_norm=batch_norm)\n\n\tdef update_masks(self):\n\t\tself.current_mask = (self.current_mask+1)%self.num_masks\n\n\t\tdata_degrees = MaskedLinear.get_data_degrees(self.features,random_order=self.order_agnostic,\n\t\t\trandom_seed=self.current_mask)\n\n\t\tin_degrees = copy.deepcopy(data_degrees)\n\t\tfor module in self.modules():\n\t\t\tif isinstance(module, MaskedLinear):\n\t\t\t\tmodule.update_mask_and_degrees(in_degrees=in_degrees,data_degrees=data_degrees,\n\t\t\t\t\trandom_mask=self.connect_agnostic,random_seed=self.current_mask)\n\n\t\t\t\tin_degrees = module.degrees\n\n\tdef forward(self,x):\n\t\tif self.num_masks>1: self.update_masks()\n\t\treturn super(AgnosticMADE,self).forward(x)\n\n\n\n\n\n### pure transformer\n\nclass DecoderOnlyTransformerBlock(nn.Module):\n\n\tdef __init__(self, d_model, nhead, dim_feedforward=2048,dropout=0.1,activation='relu',\n\t\tkdim=None,vdim=None,attn_bias=True, checkpoint=False):\n\t\tsuper(DecoderOnlyTransformerBlock,self).__init__()\n\t\tself.self_attn = nn.MultiheadAttention(d_model,nhead, dropout=dropout, kdim=kdim,vdim=vdim,bias=attn_bias)\n\n\t\tself.linear1 = nn.Linear(d_model,dim_feedforward)\n\t\tself.dropout = nn.Dropout(dropout)\n\t\tself.linear2 = nn.Linear(dim_feedforward,d_model)\n\n\t\tself.norm1 = nn.LayerNorm(d_model)\n\t\tself.norm2 = nn.LayerNorm(d_model)\n\t\tself.dropout1 = nn.Dropout(dropout)\n\t\tself.dropout2 = nn.Dropout(dropout)\n\n\t\tself.activation = act_module(activation)\n\t\tself.checkpoint = checkpoint\n\n\tdef _attn_block(self, x, attn_mask=None, key_padding_mask=None):\n\t\tx2 = self.self_attn(x,x,x,attn_mask=attn_mask,key_padding_mask=key_padding_mask)[0]\n\t\tx = x + self.dropout1(x2)\n\t\tx = self.norm1(x)\n\t\treturn x\n\n\tdef _ff_block(self,x,attn_mask=None, key_padding_mask=None):\n\t\tx2 = self.linear2(self.dropout(self.activation(self.linear1(x))))\n\t\tx = x + self.dropout2(x2)\n\t\tx = self.norm2(x)\n\t\treturn x\n\n\tdef _forward(self, x,attn_mask=None, key_padding_mask=None):\n\t\tx = self._attn_block(x, attn_mask=attn_mask, key_padding_mask=key_padding_mask)\n\t\tx = self._ff_block(x)\n\n\t\treturn x\n\n\tdef forward(self, x, attn_mask=None, key_padding_mask=None):\n\t\tif not self.checkpoint:\n\t\t\treturn self._forward(x,attn_mask, key_padding_mask)\n\t\telse:\n\t\t\tx.requires_grad_(True)\n\t\t\treturn checkpoint.checkpoint(self._forward, x, attn_mask, key_padding_mask)\n\nclass DecoderOnlyTransformer(nn.Module):\n\tdef __init__(self, d_model=512, nhead=8,\n\t\tnum_layers=6, dim_feedforward=2048, dropout=0.1, activation='relu', kdim=None,\n\t\tvdim=None,attn_bias=True, checkpoint_blocks=False):\n\n\t\tsuper(DecoderOnlyTransformer,self).__init__()\n\n\t\tdecoder_layer = DecoderOnlyTransformerBlock(d_model=d_model,\n\t\t\tnhead=nhead,dim_feedforward=dim_feedforward,dropout=dropout,activation=activation,\n\t\t\tkdim=kdim,vdim=vdim,attn_bias=attn_bias,checkpoint=checkpoint_blocks)\n\n\t\tself.layers = _get_clones(decoder_layer, num_layers)\n\t\tself.out_norm = nn.LayerNorm(d_model)\n\n\t\tself._reset_parameters()\n\n\t\tself.d_model = d_model\n\t\tself.nhead = nhead\n\n\n\tdef forward(self, x, key_padding_mask=None):\n\t\tif x.size(2) != self.d_model:\n\t\t\traise RuntimeError('the feature number of src and tgt must be equal to d_model')\n\n\t\tattn_mask = self.generate_square_subsequent_mask(x.shape[0]).to(x.device)\n\n\t\tfor decoder_layer in self.layers:\n\t\t\tx = decoder_layer(x,attn_mask=attn_mask,key_padding_mask=key_padding_mask)\n\n\t\treturn self.out_norm(x)\n\n\tdef generate_square_subsequent_mask(self, sz):\n\t\tmask = (torch.triu(torch.ones(sz,sz))==1).transpose(0,1)\n\t\tmask = mask.float().masked_fill(mask==0, float('-inf')).masked_fill(mask==1,float(0.0))\n\t\treturn mask \n\n\tdef _reset_parameters(self):\n\n\t\tfor p in self.parameters():\n\t\t\tif p.dim() > 1:\n\t\t\t\tnn.init.xavier_uniform_(p)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n########################################## 2 distributions\n# \tbuild based on nn.Module\n#\tserve as stochastic mapping in flow transform\n########################################## \n\nclass Distribution(nn.Module):\n\n\tdef log_prob(self, x):\n\n\t\traise NotImplementError()\n\n\tdef sample(self, num_samples):\n\n\t\traise NotImplementError()\n\n\tdef sample_with_log_prob(self,num_samples):\n\n\t\tsamples = self.sample(num_samples)\n\t\tlog_prob = self.log_prob(samples)\n\t\treturn samples, log_prob\n\n\tdef forward(self, *args, mode, **kwargs):\n\n\t\tif mode == 'log_prob':\n\t\t\treturn self.log_prob(*args,**kwargs)\n\t\telse:\n\t\t\traise RuntimeError(\"Mode {} not supported.\".format(mode))\n\n\nclass DiagonalNormal(Distribution):\n\n\tdef __init__(self, shape):\n\t\tsuper(DiagonalNormal, self).__init__()\n\t\tself.shape = torch.Size(shape)\n\t\tself.loc = nn.Parameter(torch.zeros.shape)\n\t\tself.log_scale = nn.Parameter(torch.zeros(shape))\n\n\tdef log_prob(self,x):\n\t\tlog_base = -0.5*torch.log(2*np.pi) - self.log_scale\n\t\tlog_inner = -0.5*torch.exp(-2*self.log_scale)*((x-self.loc)**2)\n\t\treturn sum_except_batch(log_base + log_inner)\n\n\tdef sample(self,x):\n\t\teps = torch.randn(num_samples, *self.shape, device=self.loc.device, dtype=self.loc.dtype)\n\t\treturn self.loc + self.log_scale.exp()*eps\n\nclass ConvNormal2d(DiagonalNormal):\n\tdef __init__(self, shape):\n\t\tsuper(DiagonalNormal, self).__init__()\n\t\tassert len(shape) ==3\n\t\tself.shape = torch.Size(shape)\n\t\tself.loc = torch.nn.Parameter(torch.zeros(1,shape[0],1,1))\n\t\tself.log_scale = torch.nn.Parameter(torch.zeros(1,shape[0],1,1))\n\nclass ConditionalDistribution(Distribution):\n\n\tdef log_prob(self,x,context):\n\t\traise NotImplementError()\n\tdef sample(self,context):\n\t\traise NotImplementError()\n\tdef sample_with_log_prob(self, context):\n\t\traise NotImplementError()\n\nclass ConditionalMeanNormal(ConditionalDistribution):\n \"\"\"A multivariate Normal with conditional mean and fixed std.\"\"\"\n\n def __init__(self, net, scale=1.0):\n super(ConditionalMeanNormal, self).__init__()\n self.net = net\n self.scale = scale\n\n def cond_dist(self, context):\n mean = self.net(context)\n return Normal(loc=mean, scale=self.scale)\n\n def log_prob(self, x, context):\n dist = self.cond_dist(context)\n return sum_except_batch(dist.log_prob(x))\n\n def sample(self, context):\n dist = self.cond_dist(context)\n return dist.rsample()\n\n def sample_with_log_prob(self, context):\n dist = self.cond_dist(context)\n z = dist.rsample()\n log_prob = dist.log_prob(z)\n log_prob = sum_except_batch(log_prob)\n return z, log_prob\n\n def mean(self, context):\n return self.cond_dist(context).mean\n\n\nclass ConditionalMeanStdNormal(ConditionalDistribution):\n \"\"\"A multivariate Normal with conditional mean and learned std.\"\"\"\n\n def __init__(self, net, scale_shape):\n super(ConditionalMeanStdNormal, self).__init__()\n self.net = net\n self.log_scale = nn.Parameter(torch.zeros(scale_shape))\n\n def cond_dist(self, context):\n mean = self.net(context)\n return Normal(loc=mean, scale=self.log_scale.exp())\n\n def log_prob(self, x, context):\n dist = self.cond_dist(context)\n return sum_except_batch(dist.log_prob(x))\n\n def sample(self, context):\n dist = self.cond_dist(context)\n return dist.rsample()\n\n def sample_with_log_prob(self, context):\n dist = self.cond_dist(context)\n z = dist.rsample()\n log_prob = dist.log_prob(z)\n log_prob = sum_except_batch(log_prob)\n return z, log_prob\n\n def mean(self, context):\n return self.cond_dist(context).mean\n\n\nclass ConditionalNormal(ConditionalDistribution):\n \"\"\"A multivariate Normal with conditional mean and log_std.\"\"\"\n\n def __init__(self, net, split_dim=-1):\n super(ConditionalNormal, self).__init__()\n self.net = net\n self.split_dim = split_dim\n\n def cond_dist(self, context):\n params = self.net(context)\n mean, log_std = torch.chunk(params, chunks=2, dim=self.split_dim)\n return Normal(loc=mean, scale=log_std.exp())\n\n def log_prob(self, x, context):\n dist = self.cond_dist(context)\n return sum_except_batch(dist.log_prob(x))\n\n def sample(self, context):\n dist = self.cond_dist(context)\n return dist.rsample()\n\n def sample_with_log_prob(self, context):\n dist = self.cond_dist(context)\n z = dist.rsample()\n log_prob = dist.log_prob(z)\n log_prob = sum_except_batch(log_prob)\n return z, log_prob\n\n def mean(self, context):\n return self.cond_dist(context).mean\n\n def mean_stddev(self, context):\n dist = self.cond_dist(context)\n return dist.mean, dist.stddev\n\nclass StandardNormal(Distribution):\n\tdef __init__(self,shape):\n\t\tsuper(StandardNormal, self).__init__()\n\t\tself.shape = torch.Size(shape)\n\t\tself.register_buffer('buffer',torch.zeros(1))\n\n\tdef log_prob(self,x):\n\t\tlog_base = -0.5*np.log(2*np.pi)\n\t\tlog_inner = -0.5*x**2\n\t\treturn sum_except_batch(log_base+log_inner)\n\n\tdef sample(self, num_samples):\n\t\treturn torch.randn(num_samples,*self.shape, device=self.buffer.device, dtype=self.buffer.dtype)\n\nclass StandardUniform(Distribution):\n\tdef __init__(self, shape):\n\t\tsuper().__init__()\n\t\tself.shape = torch.Size(shape)\n\t\tself.register_buffer('zero',torch.zeros(1))\n\t\tself.register_buffer('one',torch.ones(1))\n\n\tdef log_prob(self,x):\n\t\tlb = mean_except_batch(x.ge(self.zero).type(self.zero.dtype))\n\t\tub = mean_except_batch(x.le(self.one).type(self.one.dtype))\n\t\treturn torch.log(lb*ub)\n\n\tdef sample(self, num_samples):\n\t\treturn torch.rand((num_samples,)+self.shape,device=self.zero.device, dtype=self.zero.dtype)\n\nclass ConditionalBernoulli(ConditionalDistribution):\n \"\"\"A Bernoulli distribution with conditional logits.\"\"\"\n\n def __init__(self, net):\n super(ConditionalBernoulli, self).__init__()\n self.net = net\n\n def cond_dist(self, context):\n logits = self.net(context)\n return Bernoulli(logits=logits)\n\n def log_prob(self, x, context):\n dist = self.cond_dist(context)\n return sum_except_batch(dist.log_prob(x.float()))\n\n def sample(self, context):\n dist = self.cond_dist(context)\n return dist.sample().long()\n\n def sample_with_log_prob(self, context):\n dist = self.cond_dist(context)\n z = dist.sample()\n log_prob = dist.log_prob(z)\n log_prob = sum_except_batch(log_prob)\n return z.long(), log_prob\n\n def logits(self, context):\n return self.cond_dist(context).logits\n\n def probs(self, context):\n return self.cond_dist(context).probs\n\n def mean(self, context):\n return self.cond_dist(context).mean\n\n def mode(self, context):\n return (self.cond_dist(context).logits>=0).long()\n\n\n\n########################################## 3 invertible transforms\n# \tbuild based on nn.Module\n#\tcontains polular image tensor flow transformation\n#\tserve as flow transforms\n# \tcomposition of flow transforms make a flow model\n########################################## \n\nclass Transform(nn.Module):\n\n\thas_inverse = True\n\n\t@property\n\tdef bijective(self):\n\t\traise NotImplementError()\n\n\t@property\n\tdef stochastic_forward(self):\n\t\traise NotImplementError()\n\n\t@property\n\tdef stochastic_inverse(self):\n\t\traise NotImplementedError()\n\t@property\n\tdef lower_bound(self):\n\t\treturn self.stochastic_forward\n\n\tdef forward(self,x):\n\t\traise NotImplementError()\n\n\tdef inverse(self,z):\n\t\traise NotImplementError()\n\n\nclass StochasticTransform(Transform):\n\n\thas_inverse = True\n\tbijective = False\n\tstochastic_forward = True\n\tstochastic_inverse = True\n\nclass Bijection(Transform):\n\n\tbijective = True\n\tstochastic_forward = False\n\tstochastic_inverse = False\n\tlower_bound = False\n\nclass Surjection(Transform):\n\n\tbijective = False\n\n\t@property\n\tdef stochastic_forward(self):\n\t\traise NotImplementError()\n\n\t@property\n\tdef stochastic_inverse(self):\n\t\treturn not self.stochastic_forward\n\n\n\n\nclass VAE(StochasticTransform):\n\n\tdef __init__(self, decoder, encoder):\n\t\tsuper(VAE,self).__init__()\n\t\tself.decoder = decoder\n\t\tself.encoder = encoder\n\n\tdef forward(self,x):\n\t\tz, log_qz = self.encoder.sample_with_log_prob(context=x)\n\t\tlog_px = self.decoder.log_prob(x,context=z)\n\t\treturn z,log_px-log_qz\n\n\tdef inverse(self,z):\n\t\treturn self.decoder.sample(context=z)\n\n\n\nclass FlattenTransform(Transform):\n\n\tdef __init__(self,in_shape):\n\t\tsuper(FlattenTransform,self).__init__()\n\t\tself.trans = Flatten()\n\t\tself.in_shape = in_shape\n\n\t\thas_inverse = True\n\t\tbijective = True\n\t\tstochastic_forward = False\n\t\tstochastic_inverse = False\n\n\tdef forward(self,x):\n\t\treturn self.trans(x)\n\n\tdef inverse(self,x): \n\t\treturn x.view(self.in_shape)\n\n\n\nclass UniformDequantization(Surjection):\n\n\tstochastic_forward = True\n\n\tdef __init__(self, num_bits=8):\n\t\tsuper(UniformDequantization, self).__init__()\n\t\tself.num_bits = num_bits\n\t\tself.quantization_bins = 2**num_bits\n\t\tself.register_buffer('ldj_per_dim',-torch.log(torch.tensor(self.quantization_bins, dtype=torch.float)))\n\n\tdef _ldj(self, shape):\n\t\tbatch_size = shape[0]\n\t\tnum_dims = shape[1:].numel()\n\t\tldj = self.ldj_per_dim*num_dims\n\n\t\treturn ldj.repeat(batch_size)\n\n\n\tdef forward(self,x):\n\t\tu = torch.randn(x.shape,device=self.ldj_per_dim.device,dtype=self.ldj_per_dim.dtype)\n\t\tz = (x.type(u.dtype) + u)/self.quantization_bins\n\t\tldj = self._ldj(z.shape)\n\t\treturn z,ldj\n\n\tdef inverse(self,z):\n\t\tz = self.quantization_bins*z\n\t\treturn z.floor().clamp(min=0,max=self.quantization_bins-1).long()\n\nclass QuantizationBijection(Bijection):\n\n\tdef forward(self, x):\n\t\tz = x/256\n\n\t\tbatch_size = x.shape[0]\n\n\t\tldj = x.shape[1:].numel()*torch.full([batch_size],torch.log(torch.tensor(1/256)),device=x.device,dtype=x.dtype)\n\n\t\treturn z,ldj\n\n\tdef inverse(self, z):\n\n\t\tx = z*256\n\n\t\treturn x.float()\n\n\nclass LogisticBijection1d(Bijection):\n\n\tdef forward(self,x):\n\t\tz = torch.logit(x,eps=1e-7)\n\t\t_x = torch.clamp(x,1e-7,1-1e-7)\n\t\tldj = sum_except_batch(-torch.log(_x)-torch.log(1-_x))\n\t\treturn z,ldj\n\n\tdef inverse(self,z):\n\t\tx = scipy.special.expit(z.cpu())\n\t\treturn x\n\nclass SwitchBijection1d(Bijection):\n\n\tdef forward(self,x):\n\t\ta,b = torch.chunk(x,2,1)\n\t\tz = torch.cat([b,a],dim=1)\n\t\tldj = torch.zeros((x.shape[0],)).cuda()\n\t\treturn z,ldj\n\n\tdef inverse(self,z):\n\t\ta,b = torch.chunk(z,2,1)\n\t\tx = torch.cat([b,a],dim=1)\n\t\treturn x\n\n\nclass CouplingBijection(Bijection):\n\n\tdef __init__(self, coupling_net, split_dim=1, num_condition=None):\n\t\tsuper(CouplingBijection,self).__init__()\n\t\tassert split_dim >=1\n\t\tself.coupling_net = coupling_net\n\t\tself.split_dim = split_dim\n\t\tself.num_condition = num_condition\n\n\tdef split_input(self, input):\n\t\tif self.num_condition:\n\t\t\tsplit_proportions = (self.num_condition, input.shape[self.split_dim]-self.num_condition)\n\t\t\treturn torch.split(input, split_proportions, dim=self.split_dim)\n\t\telse:\n\t\t\treturn torch.chunk(input, 2, dim=self.split_dim)\n\n\tdef forward(self,x):\n\n\t\tid,x2 = self.split_input(x)\n\t\telementwise_params = self.coupling_net(id)\n\t\tz2,ldj = self._elementwise_forward(x2, elementwise_params)\n\t\tz = torch.cat([id,z2],dim=self.split_dim)\n\n\t\treturn z,ldj\n\n\tdef inverse(self,z):\n\t\twith torch.no_grad():\n\t\t\tid,z2 = self.split_input(z)\n\t\t\telementwise_params = self.coupling_net(id)\n\t\t\tx2 = self._elementwise_inverse(z2,elementwise_params)\n\t\t\tx = torch.cat([id,x2],dim=self.split_dim)\n\t\treturn x\n\n\tdef _output_dim_mutiplier(self):\n\t\traise NotImplementError()\n\n\tdef _elementwise_forward(self,x,elementwise_params):\n\t\traise NotImplementError()\n\n\tdef _elementwise_inverse(self,z,elementwise_params):\n\t\traise NotImplementError()\n\n\nclass AdditiveCouplingBijection(CouplingBijection):\n\n\tdef _output_dim_mutiplier(self):\n\t\treturn 1\n\n\tdef _elementwise_forward(self,x,elementwise_params):\n\t\treturn x + elementwise_params, torch.zeros(x.shape[0],device=x.device, dtype=x.dtype)\n\n\tdef _elementwise_inverse(self,z,elementwise_params):\n\t\treturn z - elementwise_params\n\nclass AffineCouplingBijection(CouplingBijection):\n\n\tdef __init__(self, coupling_net, split_dim=1, num_condition=None, scale_fn=lambda s:torch.exp(s)):\n\t\tsuper(AffineCouplingBijection,self).__init__(coupling_net=coupling_net, split_dim=split_dim,num_condition=num_condition)\n\t\tassert callable(scale_fn)\n\t\tself.scale_fn = scale_fn\n\n\tdef _output_dim_mutiplier(self):\n\t\treturn 2\n\n\tdef _elementwise_forward(self,x, elementwise_params):\n\t\tassert elementwise_params.shape[-1] == self._output_dim_mutiplier()\n\t\tunconstrained_scale, shift = self._unconstrained_scale_and_shift(elementwise_params)\n\t\tscale = self.scale_fn(unconstrained_scale)\n\t\tz = scale*x + shift\n\t\tldj = sum_except_batch(torch.log(scale))\n\t\treturn z,ldj\n\n\tdef _elementwise_inverse(self,z,elementwise_params):\n\t\tassert elementwise_params.shape[-1] == self._output_dim_mutiplier()\n\t\tunconstrained_scale, shift = self._unconstrained_scale_and_shift(elementwise_params)\n\t\tscale = self.scale_fn(unconstrained_scale)\n\t\tx = (z-shift)/scale\n\t\treturn x\n\n\tdef _unconstrained_scale_and_shift(self, elementwise_params):\n\t\tunconstrained_scale = elementwise_params[..., 0]\n\t\tshift = elementwise_params[...,1]\n\t\treturn unconstrained_scale, shift\n\n\nclass AffineCouplingBijection1d(CouplingBijection):\n\n\tdef __init__(self, coupling_net, split_dim=1, num_condition=None, scale_fn=lambda s:torch.exp(s),split_type='half'):\n\t\tsuper(AffineCouplingBijection1d,self).__init__(coupling_net=coupling_net, split_dim=split_dim,num_condition=num_condition)\n\t\tassert callable(scale_fn)\n\t\tself.scale_fn = scale_fn\n\n\t\tinput_dim = self.coupling_net.input_size\n\t\tall_dim = self.coupling_net.output_size\n\n\t\tif split_type == 'half':\n\t\t\tself.coupling_index = coupling_index\n\t\telif split_type == 'random':\n\t\t\tself.coupling_index = np.sort(np.random.choice(np.arange(all_dim),input_dim,replace=False))\n\t\t\tself.no_coupling_index = []\n\t\t\tfor i in np.arange(all_dim):\n\t\t\t\tif i not in self.coupling_index:\n\t\t\t\t\tself.no_coupling_index.append(i)\n\n\tdef _output_dim_mutiplier(self):\n\t\treturn 2\n\n\tdef split_input(self, input):\n\t\tif self.num_condition:\n\t\t\tsplit_proportions = (self.num_condition, input.shape[self.split_dim]-self.num_condition)\n\t\t\treturn torch.split(input, split_proportions, dim=self.split_dim)\n\t\telse:\n\t\t\tif self.coupling_index is not None:\n\t\t\t\tid = input[:,self.coupling_index]\n\t\t\t\tx2 = input[:,self.no_coupling_index]\n\t\t\t\treturn id,x2\n\t\t\telse:\n\t\t\t\treturn torch.chunk(input, 2, dim=self.split_dim)\n\n\tdef forward(self,x):\n\n\t\tid,x2 = self.split_input(x)\n\t\telementwise_params = self.coupling_net(id)\n\t\tz2,ldj = self._elementwise_forward(x2, elementwise_params)\n\t\tz = torch.zeros(x.shape).cuda()\n\t\tz[:,self.coupling_index] = id\n\t\tz[:,self.no_coupling_index] = z2\n\n\t\treturn z,ldj\n\n\tdef inverse(self,z):\n\t\twith torch.no_grad():\n\t\t\tid,z2 = self.split_input(z)\n\t\t\telementwise_params = self.coupling_net(id)\n\t\t\tx2 = self._elementwise_inverse(z2,elementwise_params)\n\t\t\tx = torch.zeros(z.shape).cuda()\n\t\t\tx[:,self.coupling_index] = id\n\t\t\tx[:,self.no_coupling_index] = x2\n\n\t\treturn x\n\n\tdef _elementwise_forward(self,x, elementwise_params):\n\t\t# assert elementwise_params.shape[-1] == self._output_dim_mutiplier()\n\t\tunconstrained_scale, shift = self._unconstrained_scale_and_shift(elementwise_params)\n\t\tunconstrained_scale = torch.clamp(unconstrained_scale,-2,2)\n\t\tscale = self.scale_fn(unconstrained_scale)\n\n\t\tz = scale*x + shift\n\t\tldj = torch.sum(unconstrained_scale,dim=1)\n\t\treturn z,ldj\n\n\tdef _elementwise_inverse(self,z,elementwise_params):\n\t\t# assert elementwise_params.shape[-1] == self._output_dim_mutiplier()\n\t\tunconstrained_scale, shift = self._unconstrained_scale_and_shift(elementwise_params)\n\t\tunconstrained_scale = torch.clamp(unconstrained_scale,-2,2)\n\n\t\tscale = self.scale_fn(unconstrained_scale)\n\t\tx = (z-shift)/scale\n\t\treturn x\n\n\tdef _unconstrained_scale_and_shift(self, elementwise_params):\n\t\tunconstrained_scale,shift = torch.chunk(elementwise_params,2,self.split_dim)\n\t\treturn unconstrained_scale, shift\n\n\n\n\nclass AutoregressiveBijection(Bijection):\n\n\tdef __init__(self, autoregressive_net, autoregressive_order='ltr'):\n\t\tsuper(AutoregressiveBijection, self).__init__()\n\t\tassert isinstance(autoregressive_order,str) or isinstance(autoregressive_order,Iterable)\n\t\tassert autoregressive_order in {'ltr'}\n\n\t\tself.autoregressive_net = autoregressive_net\n\t\tself.autoregressive_order = autoregressive_order\n\n\tdef forward(self,x):\n\t\telementwise_params = self.autoregressive_net(x)\n\t\tz, ldj = self._elementwise_forward(x, elementwise_params)\n\t\treturn z,ldj\n\n\tdef inverse(self,z):\n\t\twith torch.no_grad():\n\t\t\tif self.autoregressive_order == 'ltr': return self._inverse_ltr(z)\n\n\tdef _inverse_ltr(self,z):\n\t\tx = torch.zeros_like(z)\n\t\tfor d in range(x.shape[1]):\n\t\t\telementwise_params = self.autoregressive_net(x)\n\t\t\tx[:,d] = self._elementwise_inverse(z[:,d],elementwise_params[:,d])\n\n\t\treturn x\n\n\tdef _output_dim_multiplier(self):\n\t\traise NotImplementError()\n\n\tdef _elementwise_forward(self, x, elementwise_params):\n\t\traise NotImplementError()\n\n\tdef _elementwise_inverse(self, z, elementwise_params):\n\t\traise NotImplementError()\n\n\nclass AdditiveAutoregressiveBijection(AutoregressiveBijection):\n\n\tdef _output_dim_multiplier(self):\n\t\treturn 1\n\n\tdef _elementwise_forward(self, x, elementwise_params):\n\t\treturn x + elementwise_params, torch.zeros(x.shape[0], device=x.device, dtype=x.dtype)\n\n\tdef _elementwise_inverse(self,z, elementwise_params):\n\t\treturn z - elementwise_params\n\n\n\nclass AffineAutoregressiveBijection(AutoregressiveBijection):\n\n\tdef __init__(self, autoregressive_net, autoregressive_order='ltr', scale_fn=lambda s:torch.exp(s)):\n\t\tsuper(AffineAutoregressiveBijection, self).__init__(autoregressive_net=autoregressive_net,autoregressive_order=autoregressive_order)\n\t\tassert callable(scale_fn)\n\t\tself.scale_fn = scale_fn\n\n\tdef _output_dim_multiplier(self):\n\t\treturn 2\n\n\tdef _elementwise_forward(self,x, elementwise_params):\n\t\tassert elementwise_params.shape[-1] == self._output_dim_multiplier()\n\t\tunconstrained_scale, shift = self._unconstrained_scale_and_shift(elementwise_params)\n\t\tscale = self.scale_fn(unconstrained_scale)\n\t\tz = scale*x + shift\n\n\t\tldj = sum_except_batch(torch.log(scale))\n\n\t\treturn z,ldj\n\n\tdef _elementwise_inverse(self, z, elementwise_params):\n\t\tassert elementwise_params.shape[-1] == self._output_dim_multiplier()\n\t\tunconstrained_scale,shift = self._unconstrained_scale_and_shift(elementwise_params)\n\t\tscale = self.scale_fn(unconstrained_scale)\n\t\tx = (z-shift)/scale\n\t\treturn x\n\n\tdef _unconstrained_scale_and_shift(self, elementwise_params):\n\n\t\tunconstrained_scale = elementwise_params[...,0]\n\t\tshift = elementwise_params[...,1]\n\t\treturn unconstrained_scale,shift\n\n\nclass AffineAutoregressiveBijection1d(AutoregressiveBijection):\n\n\tdef __init__(self, autoregressive_net, autoregressive_order='ltr', scale_fn=lambda s:torch.exp(s)):\n\t\tsuper(AffineAutoregressiveBijection1d, self).__init__(autoregressive_net=autoregressive_net,autoregressive_order=autoregressive_order)\n\t\tassert callable(scale_fn)\n\t\tself.scale_fn = scale_fn\n\n\tdef _output_dim_multiplier(self):\n\t\treturn 2\n\n\tdef _elementwise_forward(self,x, elementwise_params):\n\t\tassert elementwise_params.shape[-1] == self._output_dim_multiplier()\n\t\tunconstrained_scale, shift = self._unconstrained_scale_and_shift(elementwise_params)\n\t\tscale = self.scale_fn(unconstrained_scale)\n\t\tz = scale*x + shift\n\n\t\tldj = sum_except_batch(torch.log(scale))\n\n\t\treturn z,ldj\n\n\tdef _elementwise_inverse(self, z, elementwise_params):\n\t\tassert elementwise_params.shape[-1] == self._output_dim_multiplier()\n\t\tunconstrained_scale,shift = self._unconstrained_scale_and_shift(elementwise_params)\n\t\tscale = self.scale_fn(unconstrained_scale)\n\t\tx = (z-shift)/scale\n\t\treturn x\n\n\tdef _unconstrained_scale_and_shift(self, elementwise_params):\n\n\t\tunconstrained_scale = elementwise_params[...,0]\n\t\tshift = elementwise_params[...,1]\n\t\treturn unconstrained_scale,shift\n\n\n# net = MADE_Old(features=3072, num_params=2, hidden_features=[4], random_order=False, random_mask=False,\n# \t\trandom_seed=None, activation='relu',dropout_prob=0.0,batch_norm=False)\n\n# layer = AffineAutoregressiveBijection(net)\n\n\nclass AutoregressiveBijection2d(Bijection):\n\n\tdef __init__(self, autoregressive_net, autoregressive_order='raster_cwh'):\n\t\tsuper(AutoregressiveBijection2d,self).__init__()\n\t\tassert isinstance(autoregressive_order,str) or isinstance(autoregressive_order, Iterable)\n\t\tassert autoregressive_order in {'raster_cwh','raster_wh'}\n\t\tself.autoregressive_net = autoregressive_net\n\t\tself.autoregressive_order = autoregressive_order\n\n\tdef forward(self,x):\n\t\telementwise_params = self.autoregressive_net(x)\n\t\tz,ldj = self._elementwise_forward(x,elementwise_params)\n\t\treturn z,ldj\n\n\tdef inverse(self,z):\n\t\twith torch.no_grad:\n\t\t\tif self.autoregressive_order == 'raster_cwh': return self._inverse_raster_cwh(z)\n\t\t\tif self.autoregressive_order == 'raster_wh': return self._inverse_raster_wh(z)\n\n\tdef _inverse_raster_cwh(self,z):\n\t\tx = torch.zeros_like(z)\n\t\tfor h in range(x.shape[2]):\n\t\t\tfor w in range(x.shape[3]):\n\t\t\t\tfor c in range(x.shape[1]):\n\t\t\t\t\telementwise_params = self.autoregressive_net(x)\n\t\t\t\t\tx[:,c,h,w] = self._elementwise_inverse(z[:,c,h,w], elementwise_params[:,c,h,w])\n\n\t\treturn x\n\n\tdef _inverse_raster_wh(self,z):\n\t\tx = torch.zeros_like(z)\n\t\tfor h in range(x.shape[2]):\n\t\t\tfor w in range(x.shape[3]):\n\t\t\t\telementwise_params = self.autoregressive_net(x)\n\t\t\t\tx[:,:,h,w] = self._elementwise_inverse(z[:,:,h,w], elementwise_params[:,:,h,w])\n\t\treturn x\n\n\tdef _output_dim_multiplier(self):\n\t\traise NotImplementError()\n\n\tdef _elementwise_forward(self,x,elementwise_params):\n\t\traise NotImplementError()\n\n\tdef _elementwise_inverse(self,z,elementwise_params):\n\t\traise NotImplementError()\n\n\nclass AdditiveAutoregressiveBijection2d(AutoregressiveBijection2d):\n\n\tdef _output_dim_multiplier(self):\n\t\treturn 1\n\n\tdef _elementwise_forward(self, x, elementwise_params):\n\t\treturn x + elementwise_params, torch.zeros(x.shape[0],device=x.device, dtype=x.dtype)\n\n\tdef _elementwise_inverse(self, z, elementwise_params):\n\t\treturn z - elementwise_params\n\n\nclass AffineAutoregressiveBijection2d(AutoregressiveBijection2d):\n\n\tdef __init__(self, autoregressive_net, autoregressive_order='raster_cwh',scale_fn=lambda s:torch.exp(s)):\n\t\tsuper(AffineAutoregressiveBijection2d,self).__init__(autoregressive_net=autoregressive_net,autoregressive_order=autoregressive_order)\n\t\tassert callable(scale_fn)\n\t\tself.scale_fn = scale_fn\n\n\tdef _output_dim_multiplier(self):\n\t\treturn 2\n\n\tdef _elementwise_forward(self, x, elementwise_params):\n\t\tassert elementwise_params.shape[-1] == self._output_dim_multiplier()\n\t\tunconstrained_scale,shift = self._unconstrained_scale_and_shift(elementwise_params)\n\t\tscale = self.scale_fn(unconstrained_scale)\n\t\tz = scale*x + shift\n\t\tldj = sum_except_batch(torch.log(scale))\n\n\t\treturn z,ldj\n\n\tdef _elementwise_inverse(self, z, elementwise_params):\n\t\tassert elementwise_params.shape[-1] == self._output_dim_multiplier()\n\t\tunconstrained_scale,shift = self._unconstrained_scale_and_shift(elementwise_params)\n\t\tscale = self.scale_fn(unconstrained_scale)\n\t\tx = (z-shift)/scale\n\t\treturn x\n\n\tdef _unconstrained_scale_and_shift(self, elementwise_params):\n\t\tunconstrained_scale = elementwise_params[...,0]\n\t\tshift = elementwise_params[...,1]\n\t\treturn unconstrained_scale,shift\n\n\nclass AutoregressiveBijection(Bijection):\n\n\tdef __init__(self, autoregressive_net, autoregressive_order='ltr'):\n\t\tsuper(AutoregressiveBijection, self).__init__()\n\t\tassert isinstance(autoregressive_order,str) or isinstance(autoregressive_order,Iterable)\n\t\tassert autoregressive_order in {'ltr'}\n\n\t\tself.autoregressive_net = autoregressive_net\n\t\tself.autoregressive_order = autoregressive_order\n\n\tdef forward(self,x):\n\t\telementwise_params = self.autoregressive_net(x)\n\t\tz, ldj = self._elementwise_forward(x, elementwise_params)\n\t\treturn z,ldj\n\n\tdef inverse(self,z):\n\t\twith torch.no_grad():\n\t\t\tif self.autoregressive_order == 'ltr': return self._inverse_ltr(z)\n\n\tdef _inverse_ltr(self,z):\n\t\tx = torch.zeros_like(z)\n\t\tfor d in range(x.shape[1]):\n\t\t\telementwise_params = self.autoregressive_net(x)\n\t\t\tx[:,d] = self._elementwise_inverse(z[:,d],elementwise_params[:,d])\n\n\t\treturn x\n\n\tdef _output_dim_multiplier(self):\n\t\traise NotImplementError()\n\n\tdef _elementwise_forward(self, x, elementwise_params):\n\t\traise NotImplementError()\n\n\tdef _elementwise_inverse(self, z, elementwise_params):\n\t\traise NotImplementError()\n\nclass ResidualBijection(Bijection):\n\n\tdef __init__(self, residual_net,approx_trace_order, n_inverse_iters, approx_trace_method='precise'):\n\t\tsuper(ResidualBijection,self).__init__()\n\t\tassert isinstance(approx_trace_method,str) or isinstance(approx_trace_method, Iterable)\n\t\tassert approx_trace_method in {'russia_rollet','truncation','precise'}\n\n\t\tself.residual_net = residual_net\n\t\tself.approx_trace_method = approx_trace_method\n\n\t\tif self.approx_trace_method == 'russia_rollet':\n\t\t\tassert approx_trace_order is not None, 'russia_rollet trace approximation no need for approx_trace_order'\n\n\t\tself.approx_trace_order = approx_trace_order\n\t\tself.n_inverse_iters = n_inverse_iters\n\n\tdef forward(self,x):\n\t\tz, ldj = self._elementwise_forward(x)\n\t\treturn z,ldj\n\n\tdef inverse(self,z):\n\t\twith torch.no_grad():\n\t\t\treturn self._elementwise_inverse(z)\n\n\tdef _output_dim_multiplier(self):\n\t\traise NotImplementError()\n\n\tdef _elementwise_forward(self, x, elementwise_params):\n\t\traise NotImplementError()\n\n\tdef _elementwise_inverse(self, z, elementwise_params):\n\t\traise NotImplementError()\n\n\nclass AffineAutoregressiveBijection1d(AutoregressiveBijection):\n\n\tdef __init__(self, autoregressive_net, autoregressive_order='ltr', scale_fn=lambda s:torch.exp(s)):\n\t\tsuper(AffineAutoregressiveBijection1d, self).__init__(autoregressive_net=autoregressive_net,autoregressive_order=autoregressive_order)\n\t\tassert callable(scale_fn)\n\t\tself.scale_fn = scale_fn\n\n\tdef _output_dim_multiplier(self):\n\t\treturn 2\n\n\tdef _elementwise_forward(self,x, elementwise_params):\n\t\tassert elementwise_params.shape[-1] == self._output_dim_multiplier()\n\t\tunconstrained_scale, shift = self._unconstrained_scale_and_shift(elementwise_params)\n\t\tscale = self.scale_fn(unconstrained_scale)\n\t\tz = scale*x + shift\n\n\t\tldj = sum_except_batch(torch.log(scale))\n\n\t\treturn z,ldj\n\n\tdef _elementwise_inverse(self, z, elementwise_params):\n\t\tassert elementwise_params.shape[-1] == self._output_dim_multiplier()\n\t\tunconstrained_scale,shift = self._unconstrained_scale_and_shift(elementwise_params)\n\t\tscale = self.scale_fn(unconstrained_scale)\n\t\tx = (z-shift)/scale\n\t\treturn x\n\n\tdef _unconstrained_scale_and_shift(self, elementwise_params):\n\n\t\tunconstrained_scale = elementwise_params[...,0]\n\t\tshift = elementwise_params[...,1]\n\t\treturn unconstrained_scale,shift\n\n \n\nclass ResidualBijection1d(ResidualBijection):\n\n\tdef __init__(self, residual_net,input_size, approx_trace_order=10,n_inverse_iters=10,approx_trace_method='precise'):\n\t\tsuper(ResidualBijection1d, self).__init__(residual_net=residual_net,\n\t\t\tapprox_trace_method=approx_trace_method,\n\t\t\tapprox_trace_order=approx_trace_order,\n\t\t\tn_inverse_iters=n_inverse_iters)\n\n\t\tself.input_size = input_size\n\t\tself.get_z = lambda x: x+self.residual_net(x)\n\n\tdef _output_dim_multiplier(self):\n\t\treturn 1\n\n\tdef _elementwise_forward(self, x):\n\n\t\tjacobs = []\n\n\t\tfor b_idx in range(x.shape[0]):\n\n\t\t\txx = x[b_idx].unsqueeze(0).detach()\n\t\t\t# xx = torch.zeros_like(x[b_idx].unsqueeze(0)).to(x.device)\n\n\t\t\t# xx.data = x[b_idx].unsqueeze(0).data\n\n\t\t\tjacob = torch.autograd.functional.jacobian(self.get_z,xx,create_graph=True).squeeze().unsqueeze(0)\n\n\t\t\tjacobs.append(jacob)\n\n\t\tldj = torch.logdet(torch.cat(jacobs,dim=0))\n\n\t\tz = x + self.residual_net(x)\n\n\t\treturn z,ldj\n\n\tdef _elementwise_inverse(self, z):\n\n\t\txx = z\n\n\t\tfor _ in range(self.n_inverse_iters):\n\n\t\t\txx = z - self.residual_net(xx)\n\t\t \n\t\treturn xx\n\n\n\n\n\n# def get_z(x):\n\n# \treturn x + residual_net(x)\n\n\n# residual_net = MLP(int(3072), 3072,hidden_units=[100,100],\n# activation='relu',\n# in_lambda=None).to(x.device)\n\n# ldj = torch.zeros(x.shape[0]).to(x.device)\n\n# get_z = lambda x: x + residual_net(x)\n\n# jacobs = []\n\n# for b_idx in range(x.shape[0]):\n\n# \txx = torch.zeros_like(x[b_idx].unsqueeze(0)).to(x.device)\n\n# \txx.data = x[b_idx].unsqueeze(0).data\n\n# \txx.requires_grad = True\n\n# \tjacob = torch.autograd.functional.jacobian(get_z,xx,create_graph=True).squeeze().unsqueeze(0)\n\n# \tjacobs.append(jacob)\n\n# \t# ldj[b_idx] = torch.logdet(jacob)\n\n# torch.logdet(torch.cat(jacobs,dim=0))\n\n# z = x + residual_net(x)\n\n# jacob = torch.autograd.functional.jacobian(get_z,xx,create_graph=True).reshape(x.shape[1],x.shape[1])\n\n\n# torch.autograd.functional.jacobian(get_z,x,create_graph=True).reshape(x.shape[1],x.shape[1])\n\n\n# xx = z\n\n# for _ in range(10):\n\n# \txx = z - residual_net(xx)\n\n\n\n\n# residual_net = MLP(int(3072), 3072,hidden_units=[100,100],\n# activation='relu',\n# in_lambda=None).to(x.device)\n\n\n\n\n\n\n\n\n# A_dim = A.shape[0]\n# B = torch.eye(A_dim)\n# C = torch.zeros_like(A)\n# ind = 1\n# for k in range(approx_trace_order):\n# \tB = ind*B@A/(k+1)\n# \tind = -1*ind\n# \tC = C + B\n\n\n\n\n\n\n\nclass _ActNormBijection(Bijection):\n\n\tdef __init__(self, num_features, data_dep_init=True, eps=1e-6):\n\t\tsuper(_ActNormBijection,self).__init__()\n\t\tself.num_features = num_features\n\t\tself.data_dep_init = data_dep_init\n\t\tself.eps = eps\n\n\t\tself.register_buffer('initialized',torch.zeros(1) if data_dep_init else torch.ones(1))\n\t\tself.register_params()\n\n\tdef data_init(self,x):\n\t\tself.initialized += 1.\n\t\twith torch.no_grad():\n\t\t\tx_mean, x_std = self.compute_stats(x)\n\t\t\tself.shift.data = x_mean\n\t\t\tself.log_scale.data = torch.log(x_std + self.eps)\n\n\tdef forward(self,x):\n\t\tif self.training and not self.initialized: self.data_init(x)\n\t\tz = (x - self.shift)*torch.exp(-self.log_scale)\n\t\tldj = torch.sum(-self.log_scale).expand([x.shape[0]])*self.ldj_multiplier(x)\n\t\treturn z,ldj\n\n\tdef inverse(self,z):\n\t\treturn self.shift + z*torch.exp(self.log_scale)\n\n\tdef register_params(self):\n\t\traise NotImplementError()\n\n\tdef compute_stats(self,x):\n\t\traise NotImplementError()\n\n\tdef ldj_multiplier(self,x):\n\t\traise NotImplementError()\n\n\nclass ActNormBijection(_ActNormBijection):\n\n\tdef register_params(self):\n\n\t\tself.register_parameter('shift',nn.Parameter(torch.zeros(1,self.num_features)))\n\t\tself.register_parameter('log_scale',nn.Parameter(torch.zeros(1,self.num_features)))\n\n\tdef compute_stats(self,x):\n\n\t\tx_mean = torch.mean(x, dim=0, keepdim=True)\n\t\tx_std = torch.std(x, dim=0, keepdim=True)\n\n\t\treturn x_mean, x_std\n\n\tdef ldj_multiplier(self,x):\n\n\t\treturn 1\n\nclass ActNormBijection1d(_ActNormBijection):\n\n\tdef register_params(self):\n\t\tself.register_parameter('shift', nn.Parameter(torch.zeros(1,self.num_features,1)))\n\t\tself.register_parameter('log_scale', nn.Parameter(torch.zeros(1, self.num_features,1)))\n\n\tdef compute_stats(self,x):\n\n\t\tx_mean = torch.mean(x, dim=[0,2], keepdim=True)\n\t\tx_std = torch.std(x, dim=[0,2], keepdim=True)\n\n\t\treturn x_mean, x_std\n\n\tdef ldj_multiplier(self,x):\n\t\treturn x.shape[2]\n\n\nclass ActNormBijection2d(_ActNormBijection):\n\n\tdef register_params(self):\n\n\t\tself.register_parameter('shift',nn.Parameter(torch.zeros(1,self.num_features,1,1)))\n\t\tself.register_parameter('log_scale',nn.Parameter(torch.zeros(1,self.num_features, 1,1)))\n\t\n\tdef compute_stats(self,x):\n\n\t\tx_mean = torch.mean(x,dim=[0,2,3],keepdim=True)\n\t\tx_std = torch.std(x,dim=[0,2,3],keepdim=True)\n\n\t\treturn x_mean, x_std\n\n\tdef ldj_multiplier(self,x):\n\t\treturn x.shape[2:4].numel()\n\n\n\nclass Conv1x1(Bijection):\n\n\tdef __init__(self, num_channels, orthogonal_init=True, slogdet_cpu=True):\n\t\tsuper(Conv1x1, self).__init__()\n\n\t\tself.num_channels = num_channels\n\t\tself.slogdet_cpu = slogdet_cpu\n\t\tself.weight = nn.Parameter(torch.Tensor(num_channels,num_channels))\n\t\tself.reset_parameters(orthogonal_init)\n\n\tdef reset_parameters(self, orthogonal_init):\n\n\t\tself.orthogonal_init = orthogonal_init\n\n\t\tif self.orthogonal_init:\n\t\t\tnn.init.orthogonal_(self.weight)\n\t\telse:\n\t\t\tbound = 1.0/ np.sqrt(self.num_channels)\n\t\t\tnn.init.uniform_(self.weight, -bound, bound)\n\n\tdef _conv(self,weight, v):\n\t\t_,channel, *features = v.shape\n\t\tn_feature_dims = len(features)\n\n\t\tfill = (1,)*n_feature_dims\n\t\tweight = weight.view(channel, channel, *fill)\n\n\t\tif n_feature_dims == 1:\n\t\t\treturn F.conv1d(v,weight)\n\t\telif n_feature_dims == 2:\n\t\t\treturn F.conv2d(v,weight)\n\t\telif n_feature_dims == 3:\n\t\t\treturn F.conv3d(v,weight)\n\t\telse:\n\t\t\traise ValueError(f'Got {n_feature_dims}d tensor, expected 1d, 2d, or 3d')\n\n\tdef _logdet(self, x_shape):\n\t\tb,c,*dims = x_shape\n\t\tif self.slogdet_cpu:\n\t\t\t_, ldj_per_pixel = torch.slogdet(self.weight.to('cpu'))\n\t\telse:\n\t\t\t_,ldj_per_pixel = torch.slogdet(self.weight)\n\t\tldj = ldj_per_pixel * reduce(mul, dims)\n\t\treturn ldj.expand([b]).to(self.weight.device)\n\n\tdef forward(self,x):\n\t\tz = self._conv(self.weight,x)\n\t\tldj = self._logdet(x.shape)\n\n\t\treturn z,ldj\n\n\tdef inverse(self,z):\n\t\tweight_inv = torch.inverse(self.weight)\n\t\tx = self._conv(weight_inv, z)\n\t\treturn x\n\n\n\n\n\nclass ScalarAffineBijection(Bijection):\n\n\tdef __init__(self, shift=None, scale=None):\n\t\tsuper(ScalarAffineBijection, self).__init__()\n\t\tassert isinstance(shift, float) or shift is None, 'shift must be a float or None'\n\t\tassert isinstance(scale, float) or scale is NOne, 'scale must be a float or None'\n\n\t\tif shift is None and scale is None:\n\t\t\traise ValueError('At Least one of scale and shift must be provided.')\n\t\tif scale == 0:\n\t\t\traise ValueError('Scale can not be zero.')\n\n\t\tself.register_buffer('_shift',torch.tensor(shift if (shift is not None) else 0.))\n\t\tself.register_buffer('_scale',torch.tensor(scale if (scale is not None) else 1.))\n\n\t@property \n\tdef _log_scale(self):\n\t\treturn torch.log(torch.abs(self._scale))\n\n\tdef forward(self, x):\n\t\tbatch_size = x.shape[0]\n\t\tnum_dims = x.shape[1:].numel()\n\t\tz = x*self._scale + self._shift\n\t\tldj = torch.full([batch_size], self._log_scale*num_dims, device=x.device, dtype=x.dtype)\n\n\t\treturn z, ldj\n\n\tdef inverse(self,z):\n\t\tbatch_size = z.shape[0]\n\t\tnum_dims = z.shape[1:].numel()\n\t\tx = (z - self._shift)/self._scale\n\n\t\treturn x\n\nclass Permute(Bijection):\n\n\tdef __init__(self, permutation, dim=1):\n\t\tsuper(Permute, self).__init__()\n\t\tassert isinstance(dim, int), 'dim must be an integer'\n\t\tassert dim >= 1, 'dim must be >= 1 (0 corresponding to batch dimension)'\n\t\tassert isinstance(permutation, torch.Tensor) or isinstance(permutation, Iterable), 'permutation must be a torch.Tensor or Iterable'\n\t\tif isinstance(permutation, torch.Tensor):\n\t\t\tassert permutation.ndimension() == 1, 'permutation must be an 1D tensor, but was of shape {}'.format(permutation.shape)\n\t\telse:\n\t\t\tpermutation = torch.tensor(permutation)\n\n\t\tself.dim = dim\n\t\tself.register_buffer('permutation',permutation)\n\n\n\t@property\n\tdef inverse_permutation(self):\n\t\treturn torch.argsort(self.permutation)\n\tdef forward(self,x):\n\t\treturn torch.index_select(x, self.dim, self.permutation), torch.zeros(x.shape[0],device=x.device, dtype=x.dtype)\n\n\tdef inverse(self,z):\n\t\treturn torch.index_select(z, self.dim, self.inverse_permutation)\n\n\nclass Shuffle(Permute):\n\n\tdef __init__(self, dim_size, dim=1):\n\t\tsuper(Shuffle, self).__init__(torch.randperm(dim_size),dim)\n\nclass Reverse(Permute):\n\n\tdef __init__(self, dim_size, dim=1):\n\t\tsuper(Reverse, self).__init__(torch.arange(dim_size-1, -1,-1),dim)\n\n\nclass PermuteAxes(Bijection):\n\n\tdef __init__(self, permutation):\n\t\tsuper(PermuteAxes, self).__init__()\n\t\tassert isinstance(permutation, Iterable), 'permutation must be an Iterable'\n\t\tassert permutation[0] == 0, 'First element of permutation must be 0 (such that batch dimension stays intact)'\n\n\t\tself.permutation = permutation\n\t\tself.inverse_permutation = torch.argsort(torch.tensor(self.permutation)).tolist()\n\n\tdef forward(self, x):\n\t\tz = x.permute(self.permutation).contiguous()\n\t\tldj = torch.zeros((x.shape[0],),device=x.device, dtype=x.dtype)\n\t\treturn z,ldj\n\n\tdef inverse(self,z):\n\t\tx = z.permute(self.inverse_permutation).contiguous()\n\t\treturn x\n\nclass StochasticPermutation(StochasticTransform):\n\n\tdef __init__(self, dim=1):\n\t\tsuper(StochasticPermutation, self).__init__()\n\t\tself.register_buffer('buffer',torch.zeros(1))\n\t\tself.dim = dim\n\n\tdef forward(self,x):\n\t\trand = torch.rand(x.shape[0], x.shape[self.dim], device=x.device)\n\t\tpermutation = rand.argsort(dim=1)\n\n\t\tfor d in range(1, self.dim):\n\t\t\tpermutation = permutation.unsqueeze(1)\n\n\t\tfor d in range(self.dim +1, x.dim()):\n\t\t\tpermutation = permutation.unsqueeze(-1)\n\n\t\tpermutation = permutation.expand_as(x)\n\t\tz = torch.gather(x, self.dim, permutation)\n\t\tldj = self.buffer.new_zeros(x.shape[0])\n\t\treturn z,ldj\n\n\tdef inverse(self,z):\n\t\trand = torch.rand(z.shape[0], z.shape[self.dim], device=z.device)\n\t\tpermutation = rand.argsort(dim=1)\n\t\tfor d in range(1, self.dim):\n\t\t\tpermutation = permutation.unsqueeze(1)\n\t\tfor d in range(self.dim+1, z.dim()):\n\t\t\tpermutation = permutation.unsqueeze(-1)\n\t\tpermutation = permutation.expand_as(z)\n\t\tx = torch.gather(z, self.dim, permutation)\n\n\t\treturn x\n\n\n\nclass Reshape(Bijection):\n\n\tdef __init__(self, input_shape, output_shape):\n\t\tsuper(Reshape, self).__init__()\n\t\tself.input_shape = torch.Size(input_shape)\n\t\tself.output_shape = torch.Size(output_shape)\n\t\tassert self.input_shape.numel() == self.output_shape.numel()\n\n\tdef forward(self,x):\n\t\tbatch_size = (x.shape[0],)\n\t\tz = x.reshape(batch_size,+ self.output_shape)\n\t\tldj = torch.zeros(batch_size, device=x.device, dtype=x.dtype)\n\n\t\treturn z,ldj\n\n\tdef inverse(self,z):\n\t\tbatch_size = (z.shape[0],)\n\t\tx = z.reshape(batch_size + self.input_shape)\n\t\treturn x\n\n\nclass Rotate(Bijection):\n\n\tdef __init__(self, degrees, dim1, dim2):\n\n\t\tsuper(Rotate, self).__init__()\n\t\tassert isinstance(degrees, int), 'degrees must be an integer'\n\t\tassert isinstance(dim1, int), 'dim1 must be an integer'\n\t\tassert isinstance(dim2, int), 'dim2 must be an integer'\n\n\t\tassert degrees in {90,180,270}\n\t\tassert dim1 !=0\n\t\tassert dim2 != 0\n\t\tassert dim1 != dim2\n\n\t\tself.degrees = degrees\n\t\tself.dim1 = dim1\n\t\tself.dim2 = dim2\n\n\tdef _rotate90(self,x):\n\n\t\treturn x.transpose(self.dim1, self.dim2).flio(self.dim1)\n\n\tdef _rotate90_inv(self,z):\n\t\treturn z.flip(self.dim1).transpose(self.dim1,self.dim2)\n\n\tdef _rotate180(self,x):\n\t\treturn x.flip(self.dim1).flip(self.dim2)\n\n\tdef _rotate180_inv(self,z):\n\t\treturn z.flip(self.dim2).flip(self.dim1)\n\n\tdef _rotate270(self,x):\n\t\treturn x.transpose(self.dim1, self.dim2).flip(self.dim2)\n\n\tdef _rotate270_inv(self,z):\n\t\treturn z.flip(self.dim2).transpose(self.dim1,self.dim2)\n\n\tdef forward(self,x):\n\n\t\tif self.degrees == 90: z = self._rotate90(x)\n\t\telif self.degrees == 180: z = self._rotate180(x)\n\t\telif self.degrees == 270: z = self._rotate270(x)\n\n\t\tldj = torch.zeros(x.shape[0], device=x.device, dtype=x.dtype)\n\n\t\treturn x, torch.zeros(x.shape[0], device=x.device, dtype=x.dtype)\n\n\tdef inverse(self,z):\n\t\tif self.degrees == 90: x = self._rotate90_inv(z)\n\t\telif self.degrees == 180: x = self._rotate180_inv(z)\n\t\telif self.degrees == 270: x = self._rotate270_inv(z)\n\t\treturn z\n\n\n\n\n\n\n\n\nclass Augment(Surjection):\n\n\tstochastic_forward = True\n\n\tdef __init__(self, encoder, x_size, split_dim=1):\n\t\tsuper(Augment, self).__init__()\n\t\tassert split_dim >= 1\n\t\tself.encoder = encoder\n\t\tself.split_dim = split_dim\n\t\tself.x_size = x_size\n\t\tself.cond = isinstance(self.encoder, ConditionalDistribution)\n\n\tdef split_z(self,z):\n\t\tsplit_proportions = (self.x_size, z.shape[self.split_dim]-self.x_size)\n\t\treturn torch.split(z, split_proportions, dim=self.split_dim)\n\n\tdef forward(self,x):\n\t\tif self.cond: z2, logqz2 = self.encoder.sample_with_log_prob(context=x)\n\t\telse: z2,logqz2=self.encoder.sample_with_log_prob(num_samples=x.shape[0])\n\n\t\tz = torch.cat([x,z2],dim=self.split_dim)\n\t\tldj = -logqz2\n\t\treturn z,ldj \n\n\tdef inverse(self, z):\n\t\tx, z2 = self.split_z(z)\n\t\treturn x\n\n\n\nclass Slice(Surjection):\n\n\tstochastic_forward = False\n\n\tdef __init__(self, decoder, num_keep, dim=1):\n\t\tsuper(Slice, self).__init__()\n\t\tassert dim >= 1\n\t\tself.decoder = decoder\n\t\tself.dim = dim \n\t\tself.num_keep = num_keep\n\t\tself.cond = isinstance(self.decoder, ConditionalDistribution)\n\n\tdef split_input(self, input):\n\t\tsplit_proportions = (self.num_keep, input.shape[self.dim]-self.num_keep)\n\t\treturn torch.split(input, split_proportions, dim=self.dim)\n\n\tdef forward(self,x):\n\t\tz, x2 = self.split_input(x)\n\t\tif self.cond: ldj = self.decoder.log_prob(x2, context=z)\n\t\telse: ldj = self.decoder.log_prob(x2)\n\t\treturn z, ldj\n\n\tdef inverse(self,z):\n\t\tif self.cond: x2 = self.decoder.sample(context=z)\n\t\telse: x2 = self.decoder.sample(num_samples=z.shape[0])\n\t\tx = torch.cat([z,x2],dim=self.dim)\n\t\treturn x\n\nclass Squeeze2d(Bijection):\n\n\tdef __init__(self,factor=2, ordered=False):\n\t\tsuper(Squeeze2d,self).__init__()\n\t\tassert isinstance(factor, int)\n\t\tassert factor >1\n\t\tself.factor = factor\n\t\tself.ordered = ordered\n\n\tdef _squeeze(self,x):\n\t\tassert len(x.shape) == 4, 'Dimension should be 4, but was {}'.format(len(x.shape))\n\t\tbatch_size,c,h,w = x.shape\n\t\tassert h % self.factor == 0, 'h = {} not multiplicative of {}'.format(h, self.factor)\n\t\tassert w % self.factor == 0, 'w = {} not multiplicative of {}'.format(w, self.factor)\n\t\tt = x.view(batch_size, c, h//self.factor, self.factor, w//self.factor, self.factor)\n\t\tif not self.ordered:\n\t\t\tt = t.permute(0,1,3,5,2,4).contiguous()\n\t\telse:\n\t\t\tt = t.permute(0,3,5,1,2,4).contiguous()\n\n\t\tz = t.view(batch_size, c*self.factor**2, h//self.factor, w//self.factor)\n\t\treturn z\n\n\n\tdef _unsqueeze(self, z):\n\t\tassert len(z.shape) == 4, 'Dimension should be 4, but was {}'.format(len(x.shape))\n\t\tbatch_size,c,h,w = z.shape\n\t\tassert c % (self.factor ** 2) == 0, 'c = {} not multiplicative of {}'.format(c, self.factor ** 2)\n\t\tif not self.ordered:\n\t\t\tt = z.view(batch_size,c//self.factor**2, self.factor, self.factor,h,w)\n\t\t\tt = t.permute(0,1,4,2,5,3).contiguous()\n\t\telse:\n\t\t\tt = z.view(batch_size,self.factor, self.factor, c//self.factor**2, h,w)\n\t\t\tt = t.permute(0,3,4,1,5,2).contiguous()\n\n\t\tx = t.view(batch_size, c//self.factor**2, h*self.factor,w*self.factor)\n\t\treturn x\n\n\tdef forward(self,x):\n\t\tz = self._squeeze(x)\n\t\tldj = torch.zeros(x.shape[0],device=x.device,dtype=x.dtype)\n\t\treturn z,ldj\n\n\tdef inverse(self,z):\n\t\tx = self._unsqueeze(z)\n\t\treturn x\n\nclass Unsqueeze2d(Squeeze2d):\n\n\tdef __init__(self, factor=2, ordered=False):\n\t\tsuper(Unsqueeze2d, self).__init__(factor=factor, ordered=ordered)\n\n\tdef forward(self,x):\n\t\tz = self._unsqueeze(x)\n\t\tldj = torch.zeros(x.shape[0], device=x.device, dtype=x.dtype)\n\t\treturn z, ldj \n\n\tdef inverse(self,z):\n\t\tx = self._squeeze(z)\n\t\treturn x\n\n\n\n########################################## 4 flow model\n# \tbuild based on nn.Module\n# \tneed base_dist and transforms to initialize\n#\tcan encode image, compute loglikelihood and do samping\n########################################## \n\nclass Flow(Distribution):\n\n\tdef __init__(self, base_dist, transforms):\n\t\tsuper(Flow,self).__init__()\n\t\tassert isinstance(base_dist, Distribution)\n\t\tif isinstance(transforms,Transform): transforms = [transforms]\n\t\tassert isinstance(transforms, Iterable)\n\t\tassert all(isinstance(transform, Transform) for transform in transforms)\n\t\tself.base_dist = base_dist\n\t\tself.transforms = nn.ModuleList(transforms)\n\t\tself.lower_bound = any(transform.lower_bound for transform in transforms)\n\n\tdef log_prob(self,x):\n\t\tlog_prob = torch.zeros(x.shape[0],device=x.device)\n\t\tfor transform in self.transforms:\n\t\t\tx,ldj = transform(x)\n\t\t\tlog_prob += ldj\n\n\t\tlog_prob += self.base_dist.log_prob(x)\n\t\treturn log_prob\n\n\n\tdef sample(self, num_samples):\n\t\tz = self.base_dist.sample(num_samples)\n\t\tfor transform in reversed(self.transforms):\n\t\t\tz = transform.inverse(z)\n\n\t\treturn z\n\n\tdef sample_with_log_prob(self, num_samples):\n\t\traise RuntimeError(\"Flow does not support sample_with_log_prob, see InverseFlow instead.\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":78158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"547963568","text":"import os\nimport stat\nimport shutil\n\n\n# filePath:文件夹路\n# def delete_file(filePath):\n# if os.path.exists(filePath):\n# for fileList in os.walk(filePath):\n# for name in fileList[2]:\n# os.chmod(os.path.join(fileList[0], name), stat.S_IWRITE)\n# os.remove(os.path.join(fileList[0], name))\n# shutil.rmtree(filePath)\n# return \"delete ok\"\n# else:\n# return \"no filepath\"\n#\n#\n# print\n# os.path.exists(\"G:\\\\pythonwork\\\\server\\\\locomotivelist\\\\aaa\")\n# print\n# delete_file(\"G:\\\\pythonwork\\\\server\\\\locomotivelist\\\\aaa\")\n# from datetime import datetime,timedelta\n# time1 = datetime.now()\n# time2=time1.timestamp()\n# print(time2)\n# str=\"2019-8-8 10:10:10\"\n# cday = datetime.strptime(str, '%Y-%m-%d %H:%M:%S')\n# print(cday)\n# time3=cday.timestamp()\n# print(time3)\n# time4=time2-time3\n# print(time4)\n# import os\n# def delDir(dir):\n# # 判断传递过来的dir是否存在\n# if not os.path.exists(dir):\n# print(\"当前目录不存在\")\n# return\n# if os.path.exists(dir):\n# # 判断当前dir是文件还是文件夹\n# if os.path.isfile(dir):\n# # 当前是文件\n# print(\"文件\",dir)\n# os.remove(dir)\n# return\n# # 当前是文件夹\n# # 判断当前文件夹是否为空\n# dirlist=os.listdir(dir)\n# if len(dirlist)==0:\n# print(\"文件夹是空\",dir)\n# # 删除空的文件夹\n# os.rmdir(dir)\n# return\n# # 当前文件夹非空\n# print(\"文件夹非空\")\n# for item in dirlist:\n# # 查看当前item是文件还是文件夹\n# itemPath = os.path.join(dir, item)\n# # itemPath = item\n# if os.path.isfile(itemPath):\n# # 当前是文件 相对于当前文件所在的目录去查找item\n# # 查找到item的绝对路径\n# print(\"其中包含文件\",itemPath)\n# os.remove(itemPath)\n# else:\n# #查找到了是文件夹\n# print(\"其中包含文件夹\", itemPath)\n# delDir(itemPath)\n# else:\n# os.rmdir(dir)\n# path=os.path.join(os.getcwd(),\"aaa\")\n# print(path)\n# delDir(path)\n# import xml.sax\n#\n#\n# class MovieHandler(xml.sax.ContentHandler):\n# def __init__(self):\n# self.CurrentData = \"\"\n# self.type = \"\"\n# self.format = \"\"\n# self.year = \"\"\n# self.rating = \"\"\n# self.stars = \"\"\n# self.description = \"\"\n#\n# # 元素开始事件处理\n# def startElement(self, tag, attributes):\n# self.CurrentData = tag\n# if tag == \"movie\":\n# print\n# \"*****Movie*****\"\n# title = attributes[\"title\"]\n# print\n# \"Title:\", title\n#\n# # 元素结束事件处理\n# def endElement(self, tag):\n# if self.CurrentData == \"type\":\n# print\n# \"Type:\", self.type\n# elif self.CurrentData == \"format\":\n# print\n# \"Format:\", self.format\n# elif self.CurrentData == \"year\":\n# print\n# \"Year:\", self.year\n# elif self.CurrentData == \"rating\":\n# print\n# \"Rating:\", self.rating\n# elif self.CurrentData == \"stars\":\n# print\n# \"Stars:\", self.stars\n# elif self.CurrentData == \"description\":\n# print\n# \"Description:\", self.description\n# self.CurrentData = \"\"\n#\n# # 内容事件处理\n# def characters(self, content):\n# if self.CurrentData == \"type\":\n# self.type = content\n# elif self.CurrentData == \"format\":\n# self.format = content\n# elif self.CurrentData == \"year\":\n# self.year = content\n# elif self.CurrentData == \"rating\":\n# self.rating = content\n# elif self.CurrentData == \"stars\":\n# self.stars = content\n# elif self.CurrentData == \"description\":\n# self.description = content\n#\n#\n# if (__name__ == \"__main__\"):\n# # 创建一个 XMLReader\n# parser = xml.sax.make_parser()\n# # turn off namepsaces\n# parser.setFeature(xml.sax.handler.feature_namespaces, 0)\n#\n# # 重写 ContextHandler\n# Handler = MovieHandler()\n# parser.parse(\"sss.xml\")\n# parser.setContentHandler(Handler)\n\n# from datetime import datetime\n# dt = datetime(2015, 4, 19, 12, 10, 10) # 用指定日期时间创建datetime\n# dt.timestamp() # 把datetime转换为timestamp\n# ts=dt.timestamp() # 把datetime转换为timestamp\n# print(ts)\n#\n# time1 = datetime.now()\n# time2=time1.timestamp()\n# print(time2)\n# print(time1)\nfrom xml.dom.minidom import parse\n\n# minidom解析器打开xml文档并将其解析为内存中的一棵树\nDOMTree = parse(r'avw')\n# 获取xml文档对象,就是拿到树的根\ncollection = DOMTree.documentElement\n\nif collection.hasAttribute('shelf'):\n # 判断根节点collection是否有shelf属性,有则获取并打印属性值\n print('Root element is ', collection.getAttribute('shelf'))\n\n# 获取所有的movies节点\nmovies = collection.getElementsByTagName('movie')\n\n# 遍历集合,打印每部电影的详细信息\nfor movie in movies:\n print(\"*******************movie*******************\")\n my_list = []\n if movie.hasAttribute('title'):\n print('title is ', movie.getAttribute('title'))\n\n for node in movie.childNodes:\n my_list.append(node.nodeName)\n type = movie.getElementsByTagName('type')[0]\n print('type is ', type.childNodes[0].data)\n format = movie.getElementsByTagName('format')[0]\n print('format is ', format.childNodes[0].data)\n\n if 'year' in my_list:\n year = movie.getElementsByTagName('year')[0]\n print('year is ', year.childNodes[0].data)\n\n rating = movie.getElementsByTagName('rating')[0]\n print('rating is ', rating.firstChild.data)\n\n stars = movie.getElementsByTagName('stars')[0]\n print('stars is ', stars.childNodes[0].data)\n\n description = movie.getElementsByTagName('description')[0]\n print('description is ', description.childNodes[0].data)\n","sub_path":"locomotivelist/ast7.py","file_name":"ast7.py","file_ext":"py","file_size_in_byte":6243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"82831556","text":"from random import randint\nimport sys\n\n# setting some parameters\nnumber_of_houses = 8\n\n# size of the map\nx_max = 80\ny_max = 80\n\ndef calculate_shortest_distance(a1, a2, b1, b2):\n \"\"\"Checks shortest distance between the different points of the houses\"\"\"\n # print(min(abs(a1 - b1), abs(a1 - b2), abs(a2 - b1), abs(a2 - b2)))\n return min(abs(a1 - b1), abs(a1 - b2), abs(a2 - b1), abs(a2 - b2))\n\ndef check_overlap(a1, a2, b1, b2):\n \"\"\"Checks if there is overlap between 4 points on the same axis\"\"\"\n if (b1 >= a1 and b1 <= a2) or (b2 >= a1 and b1 <= a2):\n return True\n return False\n\ndef pythagoras(a, b):\n \"\"\"Does pythagoras\"\"\"\n c_square = float(a) ** 2 + float(b) ** 2\n return int((c_square) ** (0.5))\n\ndef check_shortest_distance(huis1, huis2):\n \"\"\"Checks if there is overlap on two axes, or one axis, or no overlap at all and returns shortest distance\"\"\"\n # if there is overlap on x and y axis, houses overlap\n if check_overlap(huis1.x1, huis1.x2, huis2.x1, huis2.x2) and \\\n check_overlap(huis1.y1, huis1.y2, huis2.y1, huis2.y2) == True:\n return False\n\n # if there is overlap on x axis, shortest distance between two houses is on y axis\n elif check_overlap(huis1.x1, huis1.x2, huis2.x1, huis2.x2) == True:\n return calculate_shortest_distance(huis1.y1, huis1.y2, huis2.y1, huis2.y2)\n\n # if there is overlap on y axis, shortest distance between two houses is on x axis\n elif check_overlap(huis1.y1, huis1.y2, huis2.y1, huis2.y2) == True:\n return calculate_shortest_distance(huis1.x1, huis1.x2, huis2.x1, huis2.x2)\n\n # if there is no overlap, pythagoras\n else:\n return pythagoras(calculate_shortest_distance(huis1.x1, huis1.x2, huis2.x1, huis2.x2), calculate_shortest_distance(huis1.y1, huis1.y2, huis2.y1, huis2.y2))\n\n\n\n\n\nclass House:\n \"\"\"Initializes the three kinds of houses\"\"\"\n def __init__(self, kind, id, x=None, y=None):\n # each house gets a unique id and coordinates of left upper corner\n self.id = id\n self.coordinates = (x, y)\n self.nearest_house = None\n self.shortest_distance = None\n\n # Eensgezinswoning\n if kind == \"E\":\n self.naam = 'E'\n self.length = 16 # 8 meter\n self.width = 16 # 8 meter\n self.price = 285000 # euro\n self.verplichte_vrijstand = 4 # 2 meter\n\n self.x1 = x\n self.x2 = x + self.length\n self.y1 = y\n self.y2 = y + self.width\n\n\ndef save_distances(huis, woningen, i):\n\n\n distances = []\n\n\n for woning in woningen:\n\n getal = check_shortest_distance(huis, woning)\n\n if getal == False:\n return False\n distances.append(getal)\n minimum = min(distances)\n min_distances = []\n counter = 0\n for distance in distances:\n if distance == minimum:\n min_distances.append({\"house_number\": i, \"distance\": distance, \"neighbour\": counter})\n counter += 1\n\n return min_distances\n\n\ndef maak_woningen(number_of_houses):\n \"\"\"Makes list of houses, after making each house, calls for checking nearest house\"\"\"\n\n neighbour_list = []\n woningen = []\n\n # makes the appropriate amount of each house\n for i in range(int(number_of_houses)):\n print(\"i = \", i)\n huis = House(\"E\", i, randint(4, (x_max - 4)), randint(4, (y_max - 4)))\n if i == 0:\n huis.nearest_house = 1\n\n else:\n min_distances = save_distances(huis, woningen, i)\n\n while min_distances == False or min_distances[0][\"distance\"] <= huis.verplichte_vrijstand:\n print(\"oeps\")\n\n huis = House(\"E\", i, randint(4, (x_max - 4)), randint(4, (y_max - 4)))\n min_distances = save_distances(huis, woningen, i)\n\n\n huis.shortest_distance = min_distances[0][\"distance\"]\n nearest_neighbours = []\n for dict in min_distances:\n nearest_neighbours.append(dict[\"neighbour\"])\n neighbour_list[dict[\"neighbour\"]].append(i)\n huis.nearest_house = nearest_neighbours\n\n woningen.append(huis)\n print(neighbour_list)\n\n\n\n return woningen\n\n\nwoningen = maak_woningen(number_of_houses)\n\n\n\n\n# def check_nearest_house(huis, woningen, i):\n#\n# if check_overlap_2d(woningen, i) == False:\n# return False\n# if check_overlap_2d(woningen, i) == \"x overlap\":\n# woningen[i].shortest_distance = check_shortest_distance(woningen[i].y1, woningen[i].y2, woningen[i - 1].y1, woningen[i - 1].y2)\n# print(\"shortest distance\")\n# print(woningen[i].shortest_distance)\n# if check_overlap_2d(woningen, i) == \"y overlap\":\n# woningen[i].shortest_distance = check_shortest_distance(woningen[i].x1, woningen[i].x2, woningen[i - 1].x1, woningen[i - 1].x2)\n# print(\"shortest distance\")\n# print(woningen[i].shortest_distance)\n# if check_overlap_2d == \"geen overlap\":\n# woningen[i].shortest_distance = ((check_shortest_distance(woningen[i].y1, woningen[i].y2, woningen[i - 1].y1, woningen[i - 1].y2))^2 + (check_shortest_distance(woningen[i].x1, woningen[i].x2, woningen[i - 1].x1, woningen[i - 1].x2))^2)^(0.5)\n# print(\"shortest distance\")\n# print(woningen[i].shortest_distance)\n# else:\n# print(\"error\")\n#\n\n\n\n#\n# # to add: check for index out of range\n# def check_for_free_spot(index, woningen, grid):\n# count = index\n# for j in range(woningen.verplichte_vrijstand):\n# count = index\n# if count > GROUND_WIDTH*GROUND_LENGTH:\n# return False\n# else:\n# for k in range(woningen.total_width):\n# if grid[count].bezetting == \"V\" or grid[count].bezetting == \" \" and count%GROUND_LENGTH != 0 and count <= (GROUND_WIDTH*GROUND_LENGTH):\n# count += 1\n# else:\n# return False\n# index += GROUND_LENGTH\n# for j in range(woningen.length):\n# count = index\n# for k in range(woningen.verplichte_vrijstand):\n# if count%GROUND_LENGTH == 0:\n# return False\n# if count > (GROUND_WIDTH*GROUND_LENGTH):\n# return False\n# if grid[count].bezetting == \"V\" or grid[count].bezetting == \" \":\n# count += 1\n# else:\n# return False\n# for i in range(woningen.width):\n# if count%GROUND_LENGTH == 0:\n# return False\n# if count > (GROUND_WIDTH*GROUND_LENGTH):\n# return False\n# if grid[count].bezetting == \" \":\n# count += 1\n# else:\n# return False\n# for k in range(woningen.verplichte_vrijstand):\n# if count%GROUND_LENGTH == 0:\n# return False\n# if count > (GROUND_WIDTH*GROUND_LENGTH):\n# return False\n# if grid[count].bezetting == \"V\" or grid[count].bezetting == \" \":\n# count += 1\n# else:\n# return False\n# index += GROUND_LENGTH\n# for j in range(woningen.verplichte_vrijstand):\n# count = index\n# if count > GROUND_WIDTH*GROUND_LENGTH:\n# return False\n# else:\n# for k in range(woningen.total_width):\n# if grid[count].bezetting == \"V\" or grid[count].bezetting == \" \" and count%GROUND_LENGTH != 0 and count <= (GROUND_WIDTH*GROUND_LENGTH):\n# count += 1\n# else:\n# return False\n# index += GROUND_LENGTH\n# return True\n#\n# def check_water(index, grid):\n# for j in range(8):\n# count = index\n# if count > GROUND_WIDTH*GROUND_LENGTH:\n# return False\n# for i in range(4):\n# if count > GROUND_WIDTH*GROUND_LENGTH:\n# return False\n# if grid[count].bezetting == \" \" and count%GROUND_LENGTH != 0 and count <= (GROUND_WIDTH*GROUND_LENGTH):\n# count += 1\n# else:\n# return False\n# index += GROUND_LENGTH\n# return True\n#\n# def random_algorithm(woningen, grid, WATER_SURF):\n# while all_houses_in_grid(woningen) == False:\n# for i in range(len(woningen)):\n# if woningen[i].ingedeeld == False:\n# index = randint(0, 115119)\n# if check_for_free_spot(index, woningen[i], grid) == True:\n# for j in range(woningen[i].verplichte_vrijstand):\n# count = index\n# for k in range(woningen[i].total_width):\n# grid[count].bezetting = \"V\"\n# count += 1\n# index += GROUND_LENGTH\n# for j in range(woningen[i].length):\n# count = index\n# for k in range(woningen[i].verplichte_vrijstand):\n# grid[count].bezetting = \"V\"\n# count += 1\n# for k in range(woningen[i].width):\n# grid[count].bezetting = woningen[i].naam\n# grid[count].id = woningen[i].id\n# count += 1\n# for k in range(woningen[i].verplichte_vrijstand):\n# grid[count].bezetting = \"V\"\n# count += 1\n# index += GROUND_LENGTH\n# for j in range(woningen[i].verplichte_vrijstand):\n# count = index\n# for k in range(woningen[i].total_width):\n# grid[count].bezetting = \"V\"\n# count += 1\n# index += GROUND_LENGTH\n# print(index)\n# woningen[i].ingedeeld = True\n# print(\"Woning %i toegevoegd.\", i)\n# while WATER_SURF > 0:\n# index = randint(0, 115119)\n# if check_water(index, grid) == True:\n# for j in range(8):\n# count = index\n# for i in range(4):\n# grid[count].bezetting = \"W\"\n# count += 1\n# WATER_SURF -= 1\n# index += GROUND_LENGTH\n# return woningen, grid\n#\n#\n# GROUND_LENGTH = 360 # 180 meter\n# GROUND_WIDTH = 320 # 160 meter\n# WATER_SURF = int(0.2 * GROUND_WIDTH * GROUND_LENGTH) # square meter;\n# # hoogte-breedteverhouding\n# # tussen 1 en de 4\n# model = 60 # 20, 40 of 60 woningen\n#\n# aantal_eensgezin = 0.6 * model\n# aantal_bungalow = 0.25 * model\n# aantal_maison = 0.15 * model\n#\n# woningen = maak_woningen(aantal_eensgezin, aantal_bungalow, aantal_maison)\n#\n# grid = create_grid(GROUND_LENGTH, GROUND_WIDTH)\n#\n# #x_axis, y_axis = determine_axes(3, 2, 5)\n#\n# woningen, grid = random_algorithm(woningen, grid, WATER_SURF)\n#\n# def add_vrijstand_en_water(grid):\n# for i in range(len(grid)):\n# if grid[i].bezetting == None:\n# if WATER_SURF > 0:\n# grid[i].bezetting = 'W'\n# WATER_SURF -= 1\n# else:\n# grid[i].bezetting = ' '\n#\n# the_grid = print_grid(GROUND_LENGTH, GROUND_WIDTH, grid)\n#\n# print(the_grid)\n#\n#\n# id_array= []\n# l = []\n# for i in range(len(grid)):\n# if (grid[i].bezetting == \"E\" or grid[i].bezetting == \"B\" or grid[i].bezetting == \"M\") and (grid[i].id not in id_array):\n# id_array.append(grid[i].id)\n# bla = grid[i].id\n# count = 0\n# while grid[i - 1].bezetting != \"E\" or grid[i - 1].bezetting != \"B\" or grid[i - 1].bezetting != \"M\":\n# if i == 0:\n# break\n# i -= 1\n# count += 1\n# l.append([bla, count])\n# print(l)\n# print(len(l))\n#\n#\n#\n#\n#\n#\n# text_file = open(\"Output.txt\", \"w\")\n# text_file.write(the_grid)\n# text_file.close()\n","sub_path":"oud/minitest.py","file_name":"minitest.py","file_ext":"py","file_size_in_byte":12060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"168544837","text":"# Copyright 2016, 2017 California Institute of Technology\n# Users must agree to abide by the restrictions listed in the\n# file \"LegalStuff.txt\" in the PROPER library directory.\n#\n# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology\n# Original IDL version by John Krist\n# Python translation by Navtej Saini, with Luis Marchen and Nikta Amiri\n\n\n\nimport proper\nimport numpy as np\nfrom numpy.fft import fft2, ifft2\n\n\ndef prop_stw(wf, dz = 0.0):\n \"\"\"Propagate from a spherical reference surface that is outside the Rayleigh \n limit from focus to a planar one that is inside. Used by propagate function.\n \n Parameters\n ----------\n wf : obj\n WaveFront class object\n \n dz : float\n Distance in meters to propagate\n \n Returns\n -------\n None \n Modifies the wavefront.\n \"\"\"\n ngrid = wf.ngrid\n \n if proper.verbose:\n print(\" STW: dz = %3.6f\" %(dz))\n \n if wf.reference_surface != \"SPHERI\":\n print(\" STW: Input reference surface not spherical. Using PTP\")\n proper.prop_ptp(wf, dz)\n return\n \n if dz == 0.0:\n dz = wf.z_w0 - wf.z\n \n wf.z = wf.z + dz\n wf.dx = wf.lamda * np.abs(dz) / (ngrid * wf.dx)\n \n direct = dz >= 0.0\n \n if direct: # forward transform\n if proper.use_fftw:\n if proper.verbose:\n print(\"using fftw for prop_stw FFTW_FORWARD\")\n x = proper.prop_fftw(wf.wfarr,directionFFTW = 'FFTW_FORWARD') / np.size(wf.wfarr)\n wf.wfarr = x\n else:\n wf.wfarr = fft2(wf.wfarr) / np.size(wf.wfarr)\n\n if proper.verbose:\n print(\" FFT2 prop_stw \")\n \n wf.wfarr *= ngrid\n else:\n if proper.use_fftw:\n if proper.verbose:\n print(\"using fftw for prop_stw FFTW_BACKWARD\")\n xi = proper.prop_fftw(wf.wfarr, directionFFTW = 'FFTW_BACKWARD') * np.size(wf.wfarr)\n wf.wfarr = xi\n else:\n wf.wfarr = ifft2(wf.wfarr) * np.size(wf.wfarr)\n wf.wfarr /= ngrid\n if proper.verbose:\n print(\"IFFT2 prop_stw \")\n proper.prop_qphase(wf, dz)\n \n if proper.phase_offset:\n wf.wfarr = wf.wfarr * np.exp(complex(0.,1.) * 2*np.pi*dz/wf.lamda)\n \n if proper.verbose:\n print(\" STW: z = %4.6f dx = %.6e\" %(wf.z, wf.dx))\n \n wf.reference_surface = \"PLANAR\"\n \n return\n","sub_path":"Proper/build/lib.macosx-10.7-x86_64-3.6/proper/prop_stw.py","file_name":"prop_stw.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"570577507","text":"# pipeline_manager\n\nimport os\nimport shutil\nimport json\n\nfrom scipy import interp\nimport matplotlib.pyplot as plt\n\nfrom attrdict import AttrDict\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import gmean\nfrom sklearn.metrics import roc_auc_score, auc, plot_roc_curve\nfrom sklearn.model_selection import train_test_split, GridSearchCV, KFold\n\nfrom mlxtend.feature_selection import SequentialFeatureSelector\nfrom mlxtend.plotting import plot_sequential_feature_selection\n\nimport joblib\n\nimport gc\ngc.enable()\n\nfrom . import pipeline_config as config\n\nfrom . import pipeline_blocks as blocks\nfrom .pipelines import PIPELINES \nfrom ..common.utils import init_logger, read_params, set_seed, param_eval, create_submission, add_prefix_keys\n\nfrom ..data_preprocessing.data_cleaning import KalapaCleaning\nfrom ..data_preprocessing.feature_extraction import KalapaFeatureExtraction\n \nset_seed(config.RANDOM_SEED)\nlogger = init_logger()\n\nclass PipelineManager:\n def preprocessing(self, tag,\n train_filepath=config.params.train_filepath,\n test_filepath=config.params.test_filepath,\n train_preprocessed_filepath=config.params.train_preprocessed_filepath,\n test_preprocessed_filepath=config.params.test_preprocessed_filepath):\n preprocessing(False, tag,\n train_filepath,\n test_filepath,\n train_preprocessed_filepath,\n test_preprocessed_filepath)\n\n def preprocessing_cv(self, data_dev_mode, tag, \n X_train_filepaths=config.params.cv_X_train_preprocessed_filepaths,\n y_train_filepaths=config.params.cv_y_train_filepaths,\n X_dev_filepaths=config.params.cv_X_dev_preprocessed_filepaths,\n y_dev_filepaths=config.params.cv_y_dev_filepaths):\n preprocessing_cv(data_dev_mode, tag,\n X_train_filepaths,\n y_train_filepaths,\n X_dev_filepaths,\n y_dev_filepaths)\n\n def train(self, pipeline_name, data_dev_mode, tag, train_filepath=config.params.train_preprocessed_filepath, test_filepath=config.params.test_preprocessed_filepath):\n self.pipe = train(pipeline_name, data_dev_mode, tag, train_filepath, test_filepath)\n \n def train_cv(self, pipeline_name, data_dev_mode, tag):\n train_cv(pipeline_name, data_dev_mode, tag)\n \n def predict(self, pipeline_name, tag, is_submit, train_filepath=config.params.train_preprocessed_filepath, test_filepath=config.params.test_preprocessed_filepath):\n predict_and_submit(pipeline_name, tag, self.pipe, train_filepath, test_filepath, is_submit=is_submit)\n\n def tuning(self, pipeline_name, tag, train_filepath=config.params.train_preprocessed_filepath, test_filepath=config.params.test_preprocessed_filepath):\n hyperparameter_tunning(pipeline_name, False, tag, train_filepath, test_filepath)\n \n def algos_test(self, data_dev_mode, tag):\n multiple_algos_test(data_dev_mode, tag)\n def kalapa_preprocessing(self, data_dev_mode, \n X_train_filepaths=config.params.cv_X_train_filepaths,\n y_train_filepaths=config.params.cv_y_train_filepaths,\n X_dev_filepaths=config.params.cv_X_dev_filepaths,\n y_dev_filepaths=config.params.cv_y_dev_filepaths):\n return kalapa_kfold_preprocessing(\n data_dev_mode, \n X_train_filepaths,\n y_train_filepaths,\n X_dev_filepaths,\n y_dev_filepaths)\n \n\ndef preprocessing(data_dev_mode, tag, train_filepath, test_filepath, train_preprocessed_filepath, test_preprocessed_filepath):\n logger.info('PREPROCESSING...')\n logger.info(f'PREPROCESSING, train filepath: {train_filepath}')\n logger.info(f'PREPROCESSING, test filepath: {test_filepath}')\n \n data = _read_data(data_dev_mode, train_filepath, test_filepath)\n train_set = data['train'].copy()\n test_set = data['test'].copy()\n\n logger.info(f'PREPROCESSING, Train shape: {train_set.shape}')\n y = train_set[config.TARGET_COL].values.reshape(-1,)\n train_set = train_set.drop(columns=config.TARGET_COL)\n\n # logger.info('PREPROCESSING, Feature extraction...')\n pca_extract = blocks.pca_block(tag)\n train_new_features = pd.DataFrame(pca_extract.transformer.fit_transform(train_set))\n test_new_features = pd.DataFrame(pca_extract.transformer.fit_transform(test_set))\n # train_set = pd.concat([train_set, train_new_features], axis=1)\n # test_set = pd.concat([test_set, test_new_features], axis=1)\n\n # logger.info('PREPROCESSING, Oversampling...')\n temp_train_set = train_set\n temp_y = y\n over_sampling = blocks.over_sample_block(config.SOLUTION_CONFIG, tag)\n # train_set, y = over_sampling.transformer.fit_transform(train_set, y)\n\n logger.info('PREPROCESSING, Feature selection...')\n selection = blocks.selection_block(config.SOLUTION_CONFIG, tag)\n selection.transformer.fit(train_set, y, test_set)\n cols_selected = selection.transformer.transform(train_set)\n logger.info(f'PREPROCESSING, Feature selection, number of features: {len(cols_selected)}')\n \n train_set = train_set[cols_selected]\n train_set[config.TARGET_COL[0]] = y\n test_set = test_set[cols_selected]\n \n del data, y, pca_extract, train_new_features, test_new_features, selection, temp_train_set, temp_y\n gc.collect()\n\n logger.info('')\n train_set.to_csv(train_preprocessed_filepath, index=False)\n test_set.to_csv(test_preprocessed_filepath, index=False)\n\n logger.info(f'PREPROCESSING, Train set is dumped into path: {train_preprocessed_filepath}')\n logger.info(f'PREPROCESSING, Test set is dumped into path: {test_preprocessed_filepath}')\n logger.info('DONE PREPROCESSING...')\n return train_set, test_set\n\ndef train(pipeline_name, data_dev_mode, tag, train_filepath, test_filepath):\n logger.info('TRAINING...')\n \n if bool(config.params.clean_experiment_directory_before_training) and os.path.isdir(config.params.experiment_dir):\n logger.info('Cleaning experiment directory...')\n shutil.rmtree(config.params.experiment_dir)\n\n data = _read_data(data_dev_mode, train_filepath, test_filepath) \n\n train_set = data['train']\n \n y = train_set[config.TARGET_COL].values.reshape(-1,)\n train_set = train_set.drop(columns=config.TARGET_COL)\n \n pipeline = PIPELINES[pipeline_name](so_config = config.SOLUTION_CONFIG, suffix=tag)\n\n logger.info('TRAINING, Start pipeline fit')\n pipeline.fit(train_set, y)\n\n logger.info('DONE TRAINING...')\n del data, train_set, y\n gc.collect()\n return pipeline \n\ndef preprocessing_cv(data_dev_mode, tag, cv_X_train_filepaths, cv_y_train_filepaths, cv_X_dev_filepaths, cv_y_dev_filepaths):\n logger.info('PREPROCESSING CV...')\n \n if bool(config.params.clean_experiment_directory_before_training) and os.path.isdir(config.params.experiment_dir):\n logger.info('Cleaning experiment directory...')\n shutil.rmtree(config.params.experiment_dir)\n kfold = _read_kfold_data(data_dev_mode,\n cv_X_train_filepaths,\n cv_y_train_filepaths,\n cv_X_dev_filepaths,\n cv_y_dev_filepaths)\n \n for i in range(0, len(kfold)):\n logger.info(f'PREPROCESSING CV, Fold {i}, Train shape: {kfold[i][\"X_train\"].shape}')\n logger.info(f'PREPROCESSING CV, Fold {i}, y train shape: {kfold[i][\"y_train\"].shape}')\n logger.info(f'PREPROCESSING CV, Fold {i}, Dev shape: {kfold[i][\"X_dev\"].shape}')\n logger.info(f'PREPROCESSING CV, Fold {i}, y dev shape: {kfold[i][\"y_dev\"].shape}')\n \n # logger.info(f'PREPROCESSING CV, Fold {i}, Feature extraction...')\n pca_extract = blocks.pca_block(tag)\n train_new_features = pd.DataFrame(pca_extract.transformer.fit_transform(kfold[i][\"X_train\"]))\n test_new_features = pd.DataFrame(pca_extract.transformer.fit_transform(kfold[i][\"X_dev\"]))\n # kfold[i][\"X_train\"]= pd.concat([kfold[i][\"X_train\"], train_new_features], axis=1)\n # kfold[i][\"X_dev\"]= pd.concat([kfold[i][\"X_dev\"], test_new_features], axis=1)\n\n # logger.info(f'PREPROCESSING, Fold {i}, Oversampling...')\n temp_train_set = kfold[i][\"X_train\"]\n temp_y = kfold[i][\"y_train\"]\n over_sampling = blocks.over_sample_block(config.SOLUTION_CONFIG, tag)\n # kfold[i][\"X_train\"], kfold[i][\"y_train\"] = over_sampling.transformer.fit_transform(kfold[i][\"X_train\"], kfold[i][\"y_train\"])\n\n # logger.info(f'PREPROCESSING, KMeanFeaturizer...')\n kmeans = blocks.kmeans_block(config.SOLUTION_CONFIG, tag)\n train_cluster = kmeans.transformer.fit_transform(kfold[i][\"X_train\"], kfold[i][\"y_train\"])\n dev_cluster = kmeans.transformer.transform(kfold[i][\"X_dev\"])\n # kfold[i][\"X_train\"] = pd.concat([kfold[i][\"X_train\"], pd.DataFrame(train_cluster)], axis=1, ignore_index=True)\n # kfold[i][\"X_train\"]['cluster'] = train_cluster\n # kfold[i][\"X_dev\"] = pd.concat([kfold[i][\"X_dev\"], pd.DataFrame(dev_cluster)], axis=1, ignore_index=True)\n # kfold[i][\"X_dev\"]['cluster'] = dev_cluster\n\n logger.info(f'PREPROCESSING, Fold {i}, Feature selection...')\n selection = blocks.selection_block(config.SOLUTION_CONFIG, tag)\n selection.transformer.fit(kfold[i][\"X_train\"], kfold[i][\"y_train\"], kfold[i][\"X_dev\"])\n cols_selected = selection.transformer.transform(kfold[i][\"X_train\"])\n logger.info(f'PREPROCESSING, Fold {i}, Feature selection, number of features: {len(cols_selected)}')\n kfold[i][\"X_train\"]= kfold[i][\"X_train\"][cols_selected]\n kfold[i][\"X_dev\"]= kfold[i][\"X_dev\"][cols_selected]\n \n del pca_extract, train_new_features, test_new_features, selection, temp_train_set, temp_y\n gc.collect()\n\n logger.info('')\n kfold[i][\"X_train\"].to_csv(config.params.cv_X_train_preprocessed_final_filepaths[i], index=False)\n kfold[i][\"X_dev\"].to_csv(config.params.cv_X_dev_preprocessed_final_filepaths[i], index=False)\n\n logger.info(f'PREPROCESSING CV, Fold {i}, Train set is dumped into path: {config.params.cv_X_train_preprocessed_filepaths[i]}')\n logger.info(f'PREPROCESSING CV, Fold {i}, y train set is dumped into path: {config.params.cv_y_train_preprocessed_filepaths[i]}')\n logger.info(f'PREPROCESSING CV, Fold {i}, Dev set is dumped into path: {config.params.cv_X_dev_preprocessed_filepaths[i]}')\n logger.info(f'DONE PREPROCESSING CV, Fold {i},...')\n \n logger.info('DONE PREPROCESSING CV...')\n\ndef FeatureSelection(pipeline_name, data_dev_mode, tag, train_filepath, test_filepath):\n logger.info('FEATURE SELECTION...')\n \n if bool(config.params.clean_experiment_directory_before_training) and os.path.isdir(config.params.experiment_dir):\n logger.info('Cleaning experiment directory...')\n shutil.rmtree(config.params.experiment_dir)\n \n data = _read_data(data_dev_mode, train_filepath, test_filepath) \n\n train_set = data['train']\n \n y = train_set[config.TARGET_COL].values.reshape(-1,)\n train_set = train_set.drop(columns=config.TARGET_COL)\n \n pipeline = PIPELINES[pipeline_name](so_config = config.SOLUTION_CONFIG, suffix=tag)\n\n sfs = SequentialFeatureSelector(estimator=pipeline, k_features=(10, len(train_set.columns)), forward=False, verbose=2, cv=5, scoring='roc_auc')\n sfs.fit(train_set.to_numpy(), y)\n \n fig = plot_sequential_feature_selection(sfs.get_metric_dict())\n plt.ylim([0.6, 1])\n plt.title('Sequential Feature Selection')\n plt.grid()\n plt.show()\n\ndef multiple_algos_test(data_dev_mode, tag):\n logger.info('TESTING ALGORITHMS...')\n \n kfold = _read_kfold_data(data_dev_mode, \n config.params.cv_X_train_preprocessed_filepaths,\n config.params.cv_y_train_filepaths,\n config.params.cv_X_dev_preprocessed_filepaths,\n config.params.cv_y_dev_filepaths)\n\n # logger.info('TESTING SINGLE MODELS...')\n # \n # for algo in ['LightGBM', 'CatBoost', 'XGBoost', 'RandomForest', 'NGBoost', 'NeuralNetwork']:\n # pipeline = PIPELINES[algo](so_config = config.SOLUTION_CONFIG, suffix=tag)\n # _cross_validate_auc(pipeline, kfold, features=None)\n \n logger.info('TESTING BLENDING MODES...')\n base_models=[\n PIPELINES[algo](so_config = config.SOLUTION_CONFIG, suffix=tag) for algo in ['LightGBM', 'CatBoost', 'XGBoost', 'RandomForest', 'NGBoost']\n ]\n meta_model = blocks.LogisticRegression()\n \n blending = PIPELINES['Blending'](base_models=base_models, meta_model=meta_model, so_config = config.SOLUTION_CONFIG, suffix=tag)\n _cross_validate_auc(blending, kfold, features=None)\n\n\ndef train_cv(pipeline_name, data_dev_mode, tag):\n logger.info('TRAINING CV ...')\n \n if bool(config.params.clean_experiment_directory_before_training) and os.path.isdir(config.params.experiment_dir):\n logger.info('Cleaning experiment directory...')\n shutil.rmtree(config.params.experiment_dir)\n\n pipeline = PIPELINES[pipeline_name](so_config = config.SOLUTION_CONFIG, suffix=tag)\n\n kfold = _read_kfold_data(data_dev_mode,\n config.params.cv_X_train_preprocessed_filepaths,\n config.params.cv_y_train_preprocessed_filepaths,\n config.params.cv_X_dev_filepaths,\n config.params.cv_y_dev_filepaths)\n\n _cross_validate_auc(pipeline, kfold, features=None)\n\ndef predict_and_submit(pipeline_name, suffix, pipeline, train_filepath, test_filepath, is_submit=False):\n logger.info('PREDICT...')\n \n logger.info('PREDICT, Start pipeline transform')\n\n data = _read_data(False, train_filepath, test_filepath)\n\n test_set = data['test']\n\n y_preds = pipeline.transform(test_set).reshape(-1)\n \n if is_submit:\n logger.info('PREDICT, Creating submission...')\n submission = create_submission(test_set, config.ID_COL[0], config.TARGET_COL[0], y_preds)\n \n submission_filepath = os.path.join(config.params.experiment_dir,'submission.csv')\n submission.to_csv(submission_filepath, index=None, encoding='utf-8')\n logger.info('PREDICT, Creating submission completed!')\n logger.info(f'submission.csv is pesisted to {submission_filepath}')\n logger.info('DONE PREDICT') \n\ndef hyperparameter_tunning(pipeline_name, data_dev_mode, tag, train_filepath, test_filepath):\n logger.info('HYPERPARAMETER TUNNING...')\n\n logger.info('HYPERPARAMETER TUNNING, Start pipeline') \n pipeline = PIPELINES[pipeline_name](so_config = config.SOLUTION_CONFIG, suffix=tag)\n\n logger.info('HYPERPARAMETER TUNNING, Create GridSearchCV...')\n param_grid = add_prefix_keys(config.SOLUTION_CONFIG.tuner[pipeline_name], f'{pipeline_name}{tag}__')\n grid = GridSearchCV(estimator=pipeline, \n param_grid=param_grid,\n verbose=1,\n cv=5,\n n_jobs=-1)\n data = _read_data(data_dev_mode, train_filepath, test_filepath) \n\n train_set = data['train']\n\n y = train_set[config.TARGET_COL].values.reshape(-1,)\n train_set = train_set.drop(columns=config.TARGET_COL)\n\n logger.info('HYPERPARAMETER TUNNING, Start GridSearchCV...')\n grid.fit(train_set, y)\n\n logger.info('HYPERPARAMETER TUNNING, Done GridSearchCV')\n logger.info(f'HYPERPARAMETER TUNNING, Best params: {grid.best_params_}')\n\n with open(pipeline_name+'.json', 'a+') as out_params_file:\n json.dump(grid.best_params_, out_params_file) \n\n del train_set, y\n gc.collect()\n\n logger.info('DONE HYPERPARAMETER TUNNING...')\n\ndef _read_data(data_dev_mode, train_filepath, test_filepath):\n logger.info('Reading data...')\n if data_dev_mode == True:\n nrows = config.DEV_SAMPLE_SIZE\n logger.info(f'Running in \"dev-mode\" with sample size of {nrows}')\n else:\n nrows = None\n \n raw_data = {}\n\n logger.info('Reading train ...')\n train = pd.read_csv(train_filepath, nrows=nrows)\n raw_data['train']=train\n logger.info('Reading test ...')\n test = pd.read_csv(test_filepath, nrows=nrows)\n raw_data['test']=test\n\n del train, test\n gc.collect()\n\n logger.info('Reading done!')\n return raw_data\n\ndef _read_kfold_data(data_dev_mode, cv_X_train_filepaths, cv_y_train_filepaths, cv_X_dev_filepaths, cv_y_dev_filepaths):\n logger.info('Reading kfold data...')\n if data_dev_mode == True:\n nrows = config.DEV_SAMPLE_SIZE\n logger.info(f'Running in \"dev-mode\" with sample size of {nrows}')\n else:\n nrows = None\n \n kfold = []\n\n for i in range(0,len(cv_X_train_filepaths)):\n X_train = pd.read_csv(cv_X_train_filepaths[i], nrows=nrows)\n y_train = pd.read_csv(cv_y_train_filepaths[i], nrows=nrows).values.reshape(-1,)\n X_dev = pd.read_csv(cv_X_dev_filepaths[i], nrows=nrows)\n y_dev = pd.read_csv(cv_y_dev_filepaths[i], nrows=nrows).values.reshape(-1,)\n kfold.append({\n \"X_train\":X_train,\n \"y_train\":y_train,\n \"X_dev\":X_dev,\n \"y_dev\":y_dev\n })\n\n logger.info('Done reading kfold data.')\n return kfold\n\ndef _cross_validate_auc(model, kfold, features=None, **clf_params):\n train_tprs = []\n train_aucs = []\n train_mean_fpr = np.linspace(0, 1, 100)\n dev_tprs = []\n dev_aucs = []\n dev_mean_fpr = np.linspace(0, 1, 100)\n\n fig, ax = plt.subplots()\n fig.set_size_inches((16,10))\n for i in range(0, 3):\n print(\"{}/{}\".format(i+1, len(kfold)))\n kf = kfold[i]\n X_train_kf, y_train_kf = kf[\"X_train\"].copy(), kf[\"y_train\"].copy()\n X_dev_kf, y_dev_kf = kf[\"X_dev\"].copy(), kf[\"y_dev\"].copy()\n if features is not None:\n X_train_kf = X_train_kf[features]\n X_dev_kf = X_dev_kf[features]\n\n model.fit(X_train_kf, y_train_kf, **clf_params)\n # plot train\n train_display = plot_roc_curve(model, X_train_kf, y_train_kf,\n name='Train ROC fold {}'.format(i),\n alpha=0.6, lw=1, ax=ax)\n train_interp_tpr = interp(train_mean_fpr, train_display.fpr, train_display.tpr)\n train_interp_tpr[0] = 0.0\n train_tprs.append(train_interp_tpr)\n train_aucs.append(train_display.roc_auc)\n # plot dev\n dev_display = plot_roc_curve(model, X_dev_kf, y_dev_kf,\n name='Dev ROC fold {}'.format(i),\n alpha=0.6, lw=1, ax=ax)\n dev_interp_tpr = interp(dev_mean_fpr, dev_display.fpr, dev_display.tpr)\n dev_interp_tpr[0] = 0.0\n dev_tprs.append(dev_interp_tpr)\n dev_aucs.append(dev_display.roc_auc)\n \n # plot mean train\n train_mean_tpr = np.mean(train_tprs, axis=0)\n train_mean_tpr[-1] = 1.0\n train_mean_auc = auc(train_mean_fpr, train_mean_tpr)\n train_std_auc = np.std(train_aucs)\n ax.plot(train_mean_fpr, train_mean_tpr, color='r',\n label=r'Train Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (train_mean_auc, train_std_auc),\n lw=2, alpha=1)\n # plot mean dev\n dev_mean_tpr = np.mean(dev_tprs, axis=0)\n dev_mean_tpr[-1] = 1.0\n dev_mean_auc = auc(dev_mean_fpr, dev_mean_tpr)\n dev_std_auc = np.std(dev_aucs)\n ax.plot(dev_mean_fpr, dev_mean_tpr, color='b',\n label=r'Dev Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (dev_mean_auc, dev_std_auc),\n lw=2, alpha=1)\n \n ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],\n title=\"Receiver operating characteristic example\")\n ax.legend(loc=\"center left\", bbox_to_anchor=(1, 0.5))\n plt.show()\n\ndef _get_KFold(X, y, n_splits, random_state=None):\n splits = []\n kf = KFold(n_splits=n_splits, random_state=random_state, shuffle=True)\n for train_index, dev_index in kf.split(X):\n if isinstance(X, pd.DataFrame):\n X_train, X_dev, y_train, y_dev = X.iloc[train_index], X.iloc[dev_index], y[train_index], y[dev_index]\n elif isinstance(X, np.ndarray):\n X_train, X_dev, y_train, y_dev = X[train_index], X[dev_index], y[train_index], y[dev_index]\n splits.append({\n \"X_train\": X_train,\n \"X_dev\": X_dev,\n \"y_train\": y_train,\n \"y_dev\": y_dev,\n })\n X_train.reset_index(inplace=True, drop=True)\n y_train.reset_index(inplace=True, drop=True)\n X_dev.reset_index(inplace=True, drop=True)\n y_dev.reset_index(inplace=True, drop=True)\n\n return splits\n\ndef kalapa_kfold_preprocessing(data_dev_mode, cv_X_train_filepaths, cv_y_train_filepaths, cv_X_dev_filepaths, cv_y_dev_filepaths):\n KFolds = _read_kfold_data(data_dev_mode, cv_X_train_filepaths, cv_y_train_filepaths, cv_X_dev_filepaths, cv_y_dev_filepaths)\n \n for i in range(len(KFolds)):\n X_train, X_dev = KFolds[i]['X_train'], KFolds[i]['X_dev']\n y_train = KFolds[i]['y_train']\n \n y_train = pd.DataFrame(y_train, columns=['label'])\n X_train_p = pd.concat([X_train, y_train], axis=1)\n \n clean = KalapaCleaning()\n X_train, X_dev = clean.fit_transform(X_train_p, X_dev)\n extraction = KalapaFeatureExtraction()\n X_train, X_dev = extraction.fit_transform(X_train_p, X_dev)\n \n X_train.drop(columns=['label'], inplace=True)\n KFolds[i]['X_train'] = X_train\n KFolds[i]['X_dev'] = X_dev\n \n \n return KFolds\n ","sub_path":"stacking_solution_1/src/data_mining/.ipynb_checkpoints/pipeline_manager-checkpoint.py","file_name":"pipeline_manager-checkpoint.py","file_ext":"py","file_size_in_byte":21833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"99370206","text":"t = int(input())\nfor _ in range(t):\n n = int(input())\n binary = format(n,'b')\n binary = list(map(int,binary))\n binary.reverse()\n ans = 0\n for i,v in enumerate(binary):\n if v==1:\n ans+=pow(3,i)\n print(ans)","sub_path":"Silver4/터.py","file_name":"터.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"12911289","text":"\"\"\"Pydate URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls import url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.urls import path, include\n\nfrom Pydate import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.base, ),\n path('register/', views.register),\n path('login/', auth_views.LoginView.as_view(), name=\"login\"),\n path('personality_test/', views.personality_test),\n path('personality_test//', views.test_vote, name='test_vote'),\n path('add_personal_questions/', views.add_personal_questions),\n path('chat/', include('Chat.urls')),\n path('profile/', views.profile),\n path('help/', views.info_view, name='info'),\n path('profile/edit/', views.update_profile),\n path('profile/editimg/', views.update_profile_picture),\n path('/personal_questionnaire/', views.personal_questionnaire, name=\"personal_questionnaire\"),\n path('my_matches/', views.my_matches, name=\"my_matches\"),\n path('view_answers/', views.view_answers, name=\"view_answers\"),\n path('view_people/', views.view_people, name=\"view_people\"),\n path('remind_pass/', views.remind_pass, name=\"remind_pass\"),\n url(r'^view_answers/(?P\\d+)/delete_match$', views.match_delete, name='match_delete'),\n url(r'^view_answers/(?P\\d+)/accept_match$', views.match_accept, name='match_accept'),\n url(r'^view_people/(?P\\d+)/make_crush$', views.yes_crush, name='yes_crush'),\n url(r'^view_people/(?P\\d+)/decline_crush$', views.no_crush, name='no_crush'),\n url(r'^logout/$', views.logout_view, name='logout')\n]\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n# static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + staticfiles_urlpatterns()\n\n# if settings.DEBUG:\n# urlpatterns += static(settings.MEDIA_URL,\n# document_root=settings.MEDIA_ROOT)\n","sub_path":"Pydate/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"653030451","text":"from __future__ import annotations\nfrom dataclasses import dataclass, field\nfrom travelport.models.type_booking_transactions_allowed_2 import TypeBookingTransactionsAllowed2\n\n__NAMESPACE__ = \"http://www.travelport.com/schema/common_v32_0\"\n\n\n@dataclass\nclass TypeTransactionsAllowed2(TypeBookingTransactionsAllowed2):\n \"\"\"\n Parameters\n ----------\n shopping_enabled\n Allow or prohibit shopping transaction for the given product type on\n this Provider/Supplier. Inheritable.\n pricing_enabled\n Allow or prohibit pricing transaction for the given product type on\n this Provider/Supplier. Inheritable.\n \"\"\"\n class Meta:\n name = \"typeTransactionsAllowed\"\n\n shopping_enabled: None | bool = field(\n default=None,\n metadata={\n \"name\": \"ShoppingEnabled\",\n \"type\": \"Attribute\",\n }\n )\n pricing_enabled: None | bool = field(\n default=None,\n metadata={\n \"name\": \"PricingEnabled\",\n \"type\": \"Attribute\",\n }\n )\n","sub_path":"travelport/models/type_transactions_allowed_2.py","file_name":"type_transactions_allowed_2.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"67296281","text":"from PyQt5 import QtWidgets\nfrom PyQt5.QtCore import Qt\nimport numpy as np\nfrom Visualizer import Visualizer\n\n\nclass TrackList:\n def __init__(self, parent):\n self.ch_tracks = dict()\n self.parent = parent\n self.parsed = False\n self.listWidget = self.parent.GetTracksList()\n self.Visualizer = Visualizer.getInstance()\n self.last_id = -1e3\n self.np_tracks = np.zeros(0)\n self.clothes = dict()\n self.tracks_path = \"\"\n self.clothes_path = \"\"\n\n def setInformation(self, tracks, clothes, tracks_path, clothes_path):\n self.clothes_path = clothes_path\n self.clothes = clothes\n self.setTracks(tracks, tracks_path, False)\n\n def setTracks(self, tracks, path, delete=True):\n if delete:\n self.clothes = dict()\n\n self.Visualizer.setTracks([])\n self.tracks_path = path\n self.np_tracks = np.copy(tracks)\n self.copy_np_tracks = np.copy(self.np_tracks)\n tracks_ids = np.unique(self.np_tracks[:, 1])\n self.last_id = tracks_ids[-1]\n self.Visualizer.setTracksInfo(self.np_tracks)\n self.displayTrackList()\n\n def displayTrackList(self):\n tracks_ids = np.unique(self.np_tracks[:, 1])\n for track in tracks_ids:\n clothes = self.getTrackClothes(track)\n flagIn = any(map(lambda v: v in clothes, self.parent.checked_clothes))\n if not self.parent.Fined or flagIn:\n self.add_chbox(f\"{str(track)} ({', '.join(clothes)})\", track)\n self.listWidget.addStretch()\n\n def getTrackClothes(self, track):\n track = str(track)\n ret = []\n if track in self.clothes:\n ret = self.clothes[track]\n return ret\n\n def add_chbox(self, text, track):\n wid = self.parent.addCHeckBoxTrack(text)\n self.ch_tracks[track] = wid\n\n def add_chbox_new(self, text, track):\n self.clearTracks(False)\n wid = self.parent.addCHeckBoxTrack(text)\n self.ch_tracks[track] = wid\n self.listWidget.addStretch()\n\n def clearTracks(self, all=True):\n for i in reversed(range(self.listWidget.count())):\n item = self.listWidget.itemAt(i)\n if item.spacerItem():\n self.listWidget.removeItem(item)\n elif self.listWidget.itemAt(i).widget() and all:\n self.listWidget.itemAt(i).widget().setParent(None)\n else:\n pass\n\n def getCheckedTracks(self):\n checked = []\n for id, ch_track in self.ch_tracks.items():\n if ch_track.isChecked():\n checked.append(id)\n return checked\n\n def checkAll(self):\n for _, ch_track in self.ch_tracks.items():\n ch_track.setCheckState(True)\n\n def uncheckAll(self):\n for _, ch_track in self.ch_tracks.items():\n ch_track.setCheckState(False)\n\n def applyTracks(self):\n self.Visualizer.setTracks(self.getCheckedTracks())\n\n def makeSplit(self, frame):\n checked = self.getCheckedTracks()\n if len(checked) != 1:\n self.parent.DisplayMsg(\"For split you should check one and only one track!\")\n return\n cur_track = self.np_tracks[self.np_tracks[:, 1] == checked[0]]\n prev = np.sum(cur_track[:, 0] <= frame)\n post = np.sum(cur_track[:, 0] > frame)\n if prev == 0:\n self.parent.DisplayMsg(\"First part of track is empty\")\n return\n if post == 0:\n self.parent.DisplayMsg(\"Second part of track is empty\")\n return\n\n print(\"Frame to split: \", frame)\n mask = (self.np_tracks[:, 0] > frame) & (self.np_tracks[:, 1] == checked[0])\n self.np_tracks[mask, 1] = self.last_id + 1\n self.last_id += 1\n self.add_chbox_new(self.last_id, self.last_id)\n self.Visualizer.setTracksInfo(self.np_tracks)\n\n def makeConcat(self):\n checked = self.getCheckedTracks()\n if len(checked) != 2:\n self.parent.DisplayMsg(\"For concat you should check two and only two track!\")\n return\n\n first_track = self.np_tracks[self.np_tracks[:, 1] == checked[0]]\n second_track = self.np_tracks[self.np_tracks[:, 1] == checked[1]]\n first_frames = set(np.unique(first_track[:, 0]))\n second_frames = set(np.unique(second_track[:, 0]))\n\n if len(first_frames & second_frames) > 4:\n self.parent.DisplayMsg(\"Tracks can overlap in no more than 4 frames\")\n return\n\n mask = (self.np_tracks[:, 1] == checked[1])\n self.np_tracks[mask, 1] = checked[0]\n self.ch_tracks[checked[1]].setParent(None)\n del self.ch_tracks[checked[1]]\n self.Visualizer.setTracksInfo(self.np_tracks)\n\n def deleteTracks(self):\n checked = self.getCheckedTracks()\n mask = np.array([True] * self.np_tracks.shape[0])\n\n for track in checked:\n mask = mask & (self.np_tracks[:, 1] == track)\n self.ch_tracks[track].setParent(None)\n del self.ch_tracks[track]\n\n self.np_tracks = self.np_tracks[mask, :]\n self.Visualizer.setTracksInfo(self.np_tracks)\n\n def resetChanges(self):\n self.np_tracks = np.copy(self.copy_np_tracks)\n self.clearTracks()\n self.displayTrackList()\n\n def saveChanges(self):\n tracks_file = open(self.tracks_path, \"w\")\n for line in self.np_tracks:\n print('%d,%d,%.2f,%.2f,%.2f,%.2f,1,-1,-1,-1' % (\n line[0], line[1], line[2], line[3], line[4], line[5]),\n file=tracks_file)\n tracks_file.close()\n self.copy_np_tracks = np.copy(self.np_tracks)\n self.parent.DisplayMsg(\"Saved!\")\n","sub_path":"code/TrackList.py","file_name":"TrackList.py","file_ext":"py","file_size_in_byte":5716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"231781935","text":"\"\"\"\nChallenge: itertools permutations\nLink to Challenge: https://www.hackerrank.com/challenges/itertools-permutations/problem\n\nAuthor: Kaustubh M. Harapanahalli\n\"\"\"\n\nfrom itertools import permutations\n\nstring, num = input().split()\n\noutputs = list(permutations(string, int(num)))\noutputs.sort()\n\nfor output in outputs:\n print(''.join(output))\n","sub_path":"Python/ch27_itertools_permutations.py","file_name":"ch27_itertools_permutations.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"462661520","text":"'''\nGiven an array containing n distinct numbers taken from 0, 1, 2, ..., n, find \nthe one that is missing from the array.\n\nExample 1:\n\nInput: [3,0,1]\nOutput: 2\nExample 2:\n\nInput: [9,6,4,2,3,5,7,0,1]\nOutput: 8\nNote:\nYour algorithm should run in linear runtime complexity. Could you implement it \nusing only constant extra space complexity?\n'''\n\ndef missing_number(nums):\n sumx = 0 \n for number in nums:\n sumx += number\n\n total = (len(nums)*(len(nums)+1)) // 2\n return total-sumx\n\n","sub_path":"algorithms/math/missing_number.py","file_name":"missing_number.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"382344726","text":"from __future__ import unicode_literals, absolute_import\n\nimport os\n\nfrom ..stpipe import Pipeline\nfrom .. import datamodels\n\nfrom ..resample import resample_step\nfrom ..skymatch import skymatch_step\nfrom ..outlier_detection import outlier_detection_step\nfrom ..source_catalog import source_catalog_step\nfrom ..tweakreg_catalog import tweakreg_catalog_step\nfrom ..tweakreg import tweakreg_step\n\n__version__ = \"0.7.0\"\n\n\nclass Image3Pipeline(Pipeline):\n \"\"\"\n Image3Pipeline: Applies level 3 processing to imaging-mode data from\n any JWST instrument.\n\n Included steps are:\n tweakreg_catalog\n tweakreg\n skymatch\n outlier_detection\n resample\n source_catalog\n \"\"\"\n\n spec = \"\"\"\n suffix = string(default='i2d')\n \"\"\"\n\n # Define alias to steps\n step_defs = {'tweakreg_catalog': tweakreg_catalog_step.TweakregCatalogStep,\n 'tweakreg': tweakreg_step.TweakRegStep,\n 'skymatch': skymatch_step.SkyMatchStep,\n 'outlier_detection': outlier_detection_step.OutlierDetectionStep,\n 'resample': resample_step.ResampleStep,\n 'source_catalog': source_catalog_step.SourceCatalogStep\n }\n\n def process(self, input):\n \"\"\"\n Run the Image3Pipeline\n\n Parameters\n ----------\n input: Level3 Association, or ModelContainer\n The exposures to process\n \"\"\"\n\n self.log.info('Starting calwebb_image3 ...')\n\n input_models = datamodels.open(input)\n\n # Check if input is multiple exposures, as required by some steps\n is_container = isinstance(input_models, datamodels.ModelContainer)\n if is_container and len(input_models.group_names) > 1:\n\n self.log.info(\"Generating source catalogs for alignment...\")\n input_models = self.tweakreg_catalog(input_models)\n\n self.log.info(\"Aligning input images...\")\n input_models = self.tweakreg(input_models)\n\n # Clean up tweakreg catalogs which no are no longer needed\n for model in input_models:\n try:\n catalog_name = model.meta.tweakreg_catalog.filename\n os.remove(catalog_name)\n except:\n pass\n\n self.log.info(\"Matching sky values across all input images...\")\n input_models = self.skymatch(input_models)\n\n self.log.info(\"Performing outlier detection on input images...\")\n input_models = self.outlier_detection(input_models)\n\n self.log.info(\"Writing Level 2c images with updated DQ arrays...\")\n suffix_2c = 'cal-{}'.format(input_models.meta.asn_table.asn_id)\n for model in input_models:\n self.save_model(model, suffix=suffix_2c)\n\n self.log.info(\"Resampling images to final output...\")\n output = self.resample(input_models)\n\n product = input_models.meta.asn_table.products[0].name + '.fits'\n output.meta.filename = product\n self.save_model(output, suffix=self.suffix)\n self.log.info('Saved resampled image to %s', output.meta.filename)\n\n self.log.info(\"Creating source catalog...\")\n out_catalog = self.source_catalog(output)\n # NOTE: source_catalog step writes out the catalog in .ecsv format\n # In the future it would be nice if it was returned to the pipeline,\n # and then written here. A datamodel for .ecsv might be required.\n\n return\n","sub_path":"jwst/pipeline/calwebb_image3.py","file_name":"calwebb_image3.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"84986745","text":"import numpy as np\nimport itertools\n\nEPS = 1e-10\n\n\nclass LineSeg:\n\n def __str__(self):\n return f\"{self.x0},{self.y0} => {self.x1},{self.y1}\"\n\n def __repr__(self):\n return f\"{self.x0},{self.y0} => {self.x1},{self.y1}\"\n\n def __init__(self, line):\n if isinstance(line, str):\n pt0, pt1 = line.split(' -> ')\n self.x0, self.y0 = list(map(int, pt0.split(',')))\n self.x1, self.y1 = list(map(int, pt1.split(',')))\n else:\n self.x0, self.y0, self.x1, self.y1 = line\n self.x_min, self.x_max = sorted([self.x0, self.x1])\n self.y_min, self.y_max = sorted([self.y0, self.y1])\n self.diag = False if ((self.x0 == self.x1) or (self.y0 == self.y1)) else True\n try:\n self.angle = np.arctan((self.y1-self.y0)/(self.x1-self.x0))\n except ZeroDivisionError:\n self.angle = np.pi/2\n self.pts = None\n\n def intersect_v1(self, other):\n \"\"\"\n Determine intersection analytically, return a LineSeg representing it.\n This is only built to work for Part 1, and will not handle diagonal line segments.\n \"\"\"\n if (\n self.x_max < other.x_min or\n self.x_min > other.x_max or\n self.y_max < other.y_min or\n self.y_min > other.y_max or\n other.y_max < self.y_min or\n other.y_min > self.y_max or\n other.x_max < self.x_min or\n other.x_min > self.x_max\n ):\n return None\n if np.abs(self.angle - other.angle) < EPS:\n if self.x0 == self.x1:\n # both vertical\n if self.x0 != other.x0:\n return None\n return LineSeg([self.x0, max(self.y_min, other.y_min), self.x1, min(self.y_max, other.y_max)])\n elif self.y0 == self.y1:\n # both horizontal\n if self.y0 != other.y0:\n return None\n return LineSeg([max(self.x_min, other.x_min), self.y0, min(self.x_max, other.x_max), self.y1])\n else:\n if self.x0 == self.x1:\n # self is vertical, other is horizontal\n return LineSeg([self.x0, other.y0, self.x0, other.y0])\n elif self.y0 == self.y1:\n # self is horizontal, other is vertical\n return LineSeg([other.x0, self.y0, other.x0, self.y0])\n\n def intersect_v2(self, other):\n \"\"\"\n Determine intersection as the intersect of two sets of points\n \"\"\"\n return self.points() & other.points()\n\n def points(self):\n if self.pts is None:\n if (\n self.x0 == self.x1 or\n self.y0 == self.y1\n ):\n self.pts = set(itertools.product(range(self.x_min, self.x_max+1), range(self.y_min, self.y_max+1)))\n else:\n x_step = 1 if self.x1 > self.x0 else -1\n y_step = 1 if self.y1 > self.y0 else -1\n self.pts = set(zip(range(self.x0, self.x1 + x_step, x_step),\n range(self.y0, self.y1 + y_step, y_step)))\n return self.pts\n\n\nclass AdventOfCode:\n\n def __init__(self, filename):\n with open(filename) as f:\n self.input = f.read().splitlines()\n self.lines = [LineSeg(line) for line in self.input]\n\n def part1(self):\n straights = list(filter(lambda x: not x.diag, self.lines))\n pts = set()\n for i, line in enumerate(straights):\n for j in range(len(straights)-i-1):\n intersect = line.intersect_v1(straights[i+j+1])\n if intersect is not None:\n pts |= intersect.points()\n return len(pts)\n\n def part2(self):\n pts = set()\n for i, line in enumerate(self.lines):\n for j in range(len(self.lines)-i-1):\n pts |= line.intersect_v2(self.lines[i+j+1])\n return len(pts)\n","sub_path":"py/2021/05.py","file_name":"05.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"228708810","text":"import datetime\nfrom django import forms\nfrom django.forms import models\nfrom bootstrap_datepicker_plus import DatePickerInput\n \nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom socat.models import Survey\nfrom socat.models import Questionnaire\nfrom socat.models import Question\nfrom socat.models import Item\nfrom socat.models import Unit\n\nclass SurveyCreateForm(forms.ModelForm):\n class Meta:\n model = Survey\n fields = [\n 'questionnaire',\n 'unit',\n 'survey_date',\n 'first_name',\n 'last_name',\n 'rank',\n 'position',\n 'reporting',\n 'prior_aor',\n 'prior_engagement',\n 'mission_start',\n 'mission_end',\n 'mission_type',\n 'mission_objective',\n ]\n\n def __init__(self, *args, **kwargs):\n super(SurveyCreateForm, self).__init__(*args, *kwargs)\n\n BOOLEAN_CHOICES = ((True, 'Yes'), (False, 'No'))\n\n self.fields['questionnaire'] = models.ModelChoiceField(\n label='Select Questionnaire:',\n queryset=Questionnaire.objects.all(),\n widget=forms.RadioSelect(),\n required=True,\n empty_label=None,\n initial=None,\n )\n self.fields['unit'] = models.ModelChoiceField(\n label='Select Partner Nation Unit:',\n queryset=Unit.objects.all(),\n widget=forms.Select(),\n required=True,\n empty_label=None,\n initial=None,\n )\n\n self.fields['survey_date'] = forms.DateTimeField(\n label='Survey Date:',\n help_text='Survey creation date',\n widget=DatePickerInput(),\n required=True,\n initial=None,\n )\n self.fields['last_name'] = forms.CharField(\n label='Last Name:',\n help_text='Last name of person creating survey',\n widget=forms.TextInput(),\n required=True,\n initial=None\n )\n self.fields['first_name'] = forms.CharField(\n label='First Name:',\n help_text='First name of person creating survey',\n widget=forms.TextInput(),\n required=True,\n initial=None\n )\n self.fields['rank'] = forms.CharField(\n label='Rank:',\n help_text='Rank of person creating survey',\n widget=forms.TextInput(),\n required=True,\n initial=None\n )\n self.fields['position'] = forms.CharField(\n label='Position:',\n help_text='Position of person creating survey',\n widget=forms.TextInput(),\n required=True,\n initial=None\n )\n self.fields['reporting'] = forms.CharField(\n label='Reporting Organization/Unit:',\n widget=forms.TextInput(),\n required=True,\n initial=None\n )\n self.fields['mission_start'] = forms.DateTimeField(\n label='Mission Start Date:',\n widget=DatePickerInput(),\n required=True,\n initial=None,\n )\n self.fields['mission_end'] = forms.DateTimeField(\n label='Mission End Date:',\n widget=DatePickerInput(),\n required=True,\n initial=None,\n )\n self.fields['mission_type'] = forms.CharField(\n label='Mission Type:',\n widget=forms.TextInput(),\n required=True,\n initial=None\n )\n self.fields['mission_objective'] = forms.CharField(\n label='Mission Objective:',\n widget=forms.Textarea(attrs={\n 'class': 'narrative',\n 'rows':3,\n }),\n required=False,\n initial=None\n )\n\n self.fields['prior_aor'] = forms.IntegerField(\n label='Prior AOR (years):',\n widget=forms.NumberInput(),\n required=True,\n initial=None\n )\n self.fields['prior_engagement'] = forms.ChoiceField(\n choices = BOOLEAN_CHOICES,\n label='Prior Engagement:',\n widget=forms.RadioSelect(),\n required=True,\n initial=None\n )\n\n def clean(self):\n cleaned_data = super(SurveyCreateForm, self).clean()\n return cleaned_data\n\n def save(self, commit=True):\n survey = super(SurveyCreateForm, self).save(commit=False)\n # Questionnaire_PartnerForceCountry_PartnerForceAbbrev_SurveyReportingUnit _SurveyCreateDate)\n now = datetime.datetime.now()\n today = now.strftime('%Y-%m-%d')\n survey.name='{}_{}_{}_{}_{}'.format(\n survey.questionnaire.questionnaire,\n survey.unit.country,\n survey.unit.unit_abbrev,\n survey.reporting,\n today\n )\n survey.save()\n return survey.id\n\nclass SurveyUpdateForm(forms.ModelForm):\n class Meta:\n model = Survey\n fields = [\n 'state',\n 'name',\n 'last_name',\n 'first_name',\n 'rank',\n 'position',\n 'reporting',\n 'prior_aor',\n 'prior_engagement',\n 'mission_start',\n 'mission_end',\n 'mission_type',\n 'mission_objective',\n ]\n\n def __init__(self, *args, **kwargs):\n self.survey = kwargs.pop('survey')\n super(SurveyUpdateForm, self).__init__(*args, *kwargs)\n BOOLEAN_CHOICES = ((True, 'Yes'), (False, 'No'))\n self.fields['state'] = forms.ChoiceField(\n choices = Survey.STATE_CHOICES,\n label='State:',\n widget=forms.RadioSelect(),\n required=True,\n initial=self.survey.state,\n )\n self.fields['name'] = forms.CharField(\n label='Survey Name:',\n widget=forms.TextInput(),\n required=True,\n initial=self.survey.name\n )\n self.fields['last_name'] = forms.CharField(\n label='Last Name:',\n widget=forms.TextInput(),\n required=False,\n initial=self.survey.last_name\n )\n self.fields['first_name'] = forms.CharField(\n label='First Name:',\n widget=forms.TextInput(),\n required=False,\n initial=self.survey.first_name\n )\n self.fields['rank'] = forms.CharField(\n label='Rank:',\n widget=forms.TextInput(),\n required=False,\n initial=self.survey.rank\n )\n self.fields['position'] = forms.CharField(\n label='Position:',\n widget=forms.TextInput(),\n required=False,\n initial=self.survey.position\n )\n self.fields['reporting'] = forms.CharField(\n label='Reporting Organization/Unit:',\n widget=forms.TextInput(),\n required=False,\n initial=self.survey.reporting\n )\n self.fields['prior_aor'] = forms.IntegerField(\n label='Prior AOR (years):',\n widget=forms.NumberInput(),\n required=False,\n initial=self.survey.prior_aor\n )\n self.fields['prior_engagement'] = forms.ChoiceField(\n choices = BOOLEAN_CHOICES,\n label='Prior Engagement:',\n widget=forms.RadioSelect(),\n required=False,\n initial=self.survey.prior_engagement\n )\n self.fields['mission_start'] = forms.DateTimeField(\n label='Mission Start Date:',\n widget=DatePickerInput(),\n required=False,\n initial=self.survey.mission_start,\n )\n self.fields['mission_end'] = forms.DateTimeField(\n label='Mission End Date:',\n widget=DatePickerInput(),\n required=False,\n initial=self.survey.mission_end,\n )\n self.fields['mission_type'] = forms.CharField(\n label='Mission Type:',\n widget=forms.TextInput(),\n required=False,\n initial=self.survey.mission_type,\n )\n self.fields['mission_objective'] = forms.CharField(\n label='Mission Objective:',\n widget=forms.Textarea(attrs={\n 'class': 'narrative',\n 'rows':3,\n }),\n required=False,\n initial=self.survey.mission_objective,\n )\n\n def clean(self):\n cleaned_data = super(SurveyUpdateForm, self).clean()\n return cleaned_data\n\n def save(self, commit=True):\n # no magic at this point i am over the fm\n changes = super(SurveyUpdateForm, self).save(commit=False)\n self.survey.state=changes.state\n self.survey.name=changes.name\n self.survey.last_name=changes.last_name\n self.survey.first_name=changes.first_name\n self.survey.rank=changes.rank\n self.survey.position=changes.position\n self.survey.save()\n","sub_path":"django/site/socat/forms/survey.py","file_name":"survey.py","file_ext":"py","file_size_in_byte":9840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"370565643","text":"#!/usr/bin/env python\n# coding: utf-8\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport gc\n\n# I will run this on a per image basis. This means that I will load all pictures of one 3d image and store them in one array. I will write this out.\n\n# Create the file-names per image\ndef create_filenames(pic,\n var_per_image = 3,\n x_start = 15,\n x_end = 300,\n y_start = 50,\n y_end = 280,\n z_start = 40,\n z_end = 220):\n\n # Initialize lists # \n names_y = list()\n names_x = list()\n\n\n for variation in range(0,var_per_image):\n for x in range(x_start, x_end):\n names_y.append(\"P\" + str(pic) + \"_slicex\" + str(x) + \".png\")\n names_x.append(\"P\" + str(pic) + \"_reco_slicex\" + str(x) + \"-000\" + str(variation) + \".png\")\n for y in range(y_start, y_end):\n names_y.append(\"P\" + str(pic) + \"_slicey\" + str(y) + \".png\")\n names_x.append(\"P\" + str(pic) + \"_reco_slicey\" + str(y) + \"-000\" + str(variation) + \".png\")\n for z in range(z_start, z_end):\n names_y.append(\"P\" + str(pic) + \"_slicez\" + str(z) + \".png\")\n names_x.append(\"P\" + str(pic) + \"_reco_slicez\" + str(z) + \"-000\" + str(variation) + \".png\")\n return [names_x, names_y]\n\ndef create_filenames(pic,\n var_per_image = 3,\n x_start = 15,\n x_end = 300,\n y_start = 50,\n y_end = 280,\n z_start = 40,\n z_end = 220):\n\n # Initialize lists # \n names_y = list()\n names_x = list()\n\n\n\n for x in range(x_start, x_end):\n for variation in range(0,var_per_image):\n names_y.append(\"P\" + str(pic) + \"_slicex\" + str(x) + \".png\")\n names_x.append(\"P\" + str(pic) + \"_reco_slicex\" + str(x) + \"-000\" + str(variation) + \".png\")\n for y in range(y_start, y_end):\n for variation in range(0,var_per_image): \n names_y.append(\"P\" + str(pic) + \"_slicey\" + str(y) + \".png\")\n names_x.append(\"P\" + str(pic) + \"_reco_slicey\" + str(y) + \"-000\" + str(variation) + \".png\")\n for z in range(z_start, z_end):\n for variation in range(0,var_per_image): \n names_y.append(\"P\" + str(pic) + \"_slicez\" + str(z) + \".png\")\n names_x.append(\"P\" + str(pic) + \"_reco_slicez\" + str(z) + \"-000\" + str(variation) + \".png\")\n return [names_x, names_y]\n\n# Load in images\n\n# For the following program to run ...\n# .. it has to be in a folder where there are four subfolders:\n# X: containing the spoiled images\n# Y: containing the good images\n# out: empty directory for the output\n# out/\n\n# -------------- #\n# Specifications #\n# -------------- #\nnum_pic = 21\npath_x = 'Images_Y/'\npath_y = 'Images_X/'\nstart_slice_x = 32\nstart_slice_y = 32\nstart_slice_z_1 = 32\nstart_slice_z_2 = 32\n\nend_slice_x = 320 - start_slice_x\nend_slice_y = 320 - start_slice_y\nend_slice_z_1 = 320 - start_slice_z_1\nend_slice_z_2 = 320 - start_slice_z_2\n\n# -------------- #\n# Loop #\n# -------------- #\n# create the matrices\ntotal_images = 0\nfor pic in range(1,num_pic):\n # Calculate the total number of images that have to be read in.\n file_names = create_filenames(pic = pic) \n total_images += len(file_names[1])\n\nshape = (total_images,) + (256, 256, 1)\nX = np.empty(shape,dtype = 'float32')\nY = np.empty(shape,dtype = 'float32')\n\niter_x = 0\niter_y = 0\nfor pic in range(1,num_pic):\n # Create the file names\n file_names = create_filenames(pic = pic) \n \n # iterate over the file names and and save them to the respective filename\n # if not existing write this to a file and go on silently \n \n ## X ## \n # create lists for missing images\n X_missing = list()\n for x_file in file_names[0]:\n file = cv2.imread(\"/home/cloud/Create_Data/\" + path_x + x_file)\n if (file is None):\n print(\"Did not find \" + path_x + x_file)\n X_missing.append(x_file)\n else:\n if pic <=9:\n slicetype_x = x_file[13]\n slicetype_y_z = x_file[13]\n else:\n slicetype_x = x_file[14]\n slicetype_y_z = x_file[14]\n if (slicetype_x == 'x'):\n if (file.shape == (320,256,3)):\n X[iter_x,:,:,:] = file[start_slice_x:end_slice_x,:,0:1] / 255\n elif (file.shape == (256, 320, 3)):\n X[iter_x,:,:,:] = file[:,start_slice_x:end_slice_x,0:1] / 255\n elif (slicetype_y_z == 'y'):\n if (file.shape == (320,256,3)):\n X[iter_x,:,:,:] = file[start_slice_y:end_slice_y,:,0:1] / 255\n elif (file.shape == (256, 320, 3)):\n X[iter_x,:,:,:] = file[:,start_slice_y:end_slice_y,0:1] / 255\n elif (slicetype_y_z == 'z'):\n X[iter_x,:,:,:] = file[start_slice_z_1:end_slice_z_1, start_slice_z_2:end_slice_z_2,0:1] / 255\n else:\n print(\"STRANGE! pic:\",pic, \"file:\", x_file, \" slicetype was \", slicetype_y_z)\n X_missing.append(\"STRANGE!!!'\" + x_file + \"STRANGE!!!\")\n iter_x += 1\n \n # save the missing list\n with open('/home/cloud/Create_Data/' + 'out/Report/' + 'P' + str(pic) + \"_missingX\", 'w') as f:\n for item in X_missing:\n f.write(\"%s\\n\" % item)\n \n \n ## Y ##\n Y_missing = list() \n for y_file in file_names[1]:\n file = cv2.imread(\"/home/cloud/Create_Data/\" + path_y + y_file)\n if (file is None):\n Y_missing.append(y_file)\n else:\n if pic <=9:\n slicetype_x = y_file[8]\n slicetype_y_z = y_file[8]\n else: \n slicetype_x = y_file[9]\n slicetype_y_z = y_file[9]\n if (slicetype_x == 'x'):\n if (file.shape == (320,256,3)):\n Y[iter_y,:,:,:] = file[start_slice_x:end_slice_x,:,0:1] / 255\n elif (file.shape == (256, 320, 3)):\n Y[iter_y,:,:,:] = file[:,start_slice_x:end_slice_x,0:1] / 255\n elif (slicetype_y_z == 'y'):\n if (file.shape == (320,256,3)):\n Y[iter_y,:,:,:] = file[start_slice_y:end_slice_y,:,0:1] / 255\n elif (file.shape == (256, 320, 3)):\n Y[iter_y,:,:,:] = file[:,start_slice_y:end_slice_y,0:1] / 255\n elif (slicetype_y_z == 'z'):\n Y[iter_y,:,:,:] = file[start_slice_z_1:end_slice_z_1, start_slice_z_2:end_slice_z_2,0:1] / 255\n else:\n print(\"STRANGE! pic:\",pic, \"file:\", y_file)\n Y_missing.append(\"STRANGE!!!'\" + y_file + \"STRANGE!!!\")\n iter_y += 1\n \n \n # Save the lists of missing images\n with open('/home/cloud/Create_Data/' + 'out/Report/' + 'P' + str(pic) + \"_missingY\", 'w') as f:\n for item in Y_missing:\n f.write(\"%s\\n\" % item)\n\n# Save the image\nY_name = '/home/cloud/Create_Data/' + \"out/\" + \"Y.npy\"\nnp.save(Y_name, Y) \n\n# save the image\nX_name = '/home/cloud/Create_Data/' + \"out/\" + \"X.npy\"\nnp.save(X_name, X)","sub_path":"Create Data/Read_in_data_rad_15.py","file_name":"Read_in_data_rad_15.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"91371018","text":"from utils.config import cfg\n\nfrom torch.utils.data import Dataset\nimport torchvision.transforms as T\n\nimport os\nimport sys\n\nimport pickle\nimport numpy as np\nimport PIL.Image as Image\n\n\nclass CIFAR10_DATASET(Dataset):\n \"\"\"`CIFAR10 `_ Dataset.\n\n Args:\n root (string): Root directory of dataset where directory\n ``cifar-10-batches-py`` exists or will be saved to if download is set to True.\n train (bool, optional): If True, creates dataset from training set, otherwise\n creates from test set.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n\n \"\"\"\n dataset_folder = cfg.CIFAR10_DIR\n url = \"https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz\"\n filename = \"cifar-10-python.tar.gz\"\n tgz_md5 = 'c58f30108f718f92721af3b95e74349a'\n train_list = [\n ['data_batch_1', 'c99cafc152244af753f735de768cd75f'],\n ['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],\n ['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],\n ['data_batch_4', '634d18415352ddfa80567beed471001a'],\n ['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],\n ]\n\n test_list = [\n ['test_batch', '40351d587109b95175f43aff81a1287e'],\n ]\n\n def __init__(self, train=True, transform=None):\n self.transform = transform\n self.train = train # training set or test set\n\n\n # now load the picked numpy arrays\n if self.train:\n self.train_data = []\n self.train_labels = []\n for fentry in self.train_list:\n f = fentry[0]\n file = os.path.join(self.dataset_folder, f)\n fo = open(file, 'rb')\n if sys.version_info[0] == 2:\n entry = pickle.load(fo)\n else:\n entry = pickle.load(fo, encoding='latin1')\n self.train_data.append(entry['data'])\n if 'labels' in entry: # TODO: CHECK\n self.train_labels += entry['labels']\n else:\n self.train_labels += entry['fine_labels']\n fo.close()\n\n self.train_data = np.concatenate(self.train_data)\n self.train_data = self.train_data.reshape((50000, 3, 32, 32))\n self.train_data = self.train_data.transpose((0, 2, 3, 1)) # convert to HWC\n else:\n f = self.test_list[0][0]\n file = os.path.join(self.dataset_folder, f)\n fo = open(file, 'rb')\n if sys.version_info[0] == 2:\n entry = pickle.load(fo)\n else:\n entry = pickle.load(fo, encoding='latin1')\n self.test_data = entry['data']\n if 'labels' in entry:\n self.test_labels = entry['labels']\n else:\n self.test_labels = entry['fine_labels']\n fo.close()\n self.test_data = self.test_data.reshape((10000, 3, 32, 32))\n self.test_data = self.test_data.transpose((0, 2, 3, 1)) # convert to HWC\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n if self.train:\n img, target = self.train_data[index], self.train_labels[index]\n else:\n img, target = self.test_data[index], self.test_labels[index]\n\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img)\n\n if self.transform is not None:\n img = self.transform(img)\n\n return img, target\n\n def __len__(self):\n if self.train:\n return len(self.train_data)\n else:\n return len(self.test_data)\n\n\ndef getDefaultTransform():\n \"\"\"\n Algorithm:\n 1. give transform for train and test\n\n \"\"\"\n\n train_transform = T.Compose([\n T.RandomResizedCrop(size=32),\n T.RandomHorizontalFlip(),\n T.ToTensor(),\n T.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010))\n ])\n\n test_transform = T.Compose([\n T.ToTensor(),\n T.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010))\n ])\n\n return train_transform, test_transform\n\n\ndef getDataset():\n \"\"\"\n train and test dataset with default transform\n\n :return:\n \"\"\"\n train_t, test_t = getDefaultTransform()\n\n trainDataset = CIFAR10_DATASET(train=True, transform=train_t)\n testDataset = CIFAR10_DATASET(train=False, transform=test_t)\n\n return trainDataset, testDataset\n\n\nif __name__ == \"__main__\":\n \"\"\"\n \"\"\"\n import time\n\n trainDataset, testDataset = getDataset()\n\n tic = time.time()\n for i in range(10000):\n data = trainDataset[i]\n\n print((time.time()-tic)/10000)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"dataset/Cifar10Dataset.py","file_name":"Cifar10Dataset.py","file_ext":"py","file_size_in_byte":5362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"457124324","text":"import torch\nimport torch.nn as nn\nfrom torch.nn.modules.upsampling import Upsample\n\n'''\n Example model construction in pytorch\n'''\nclass example_resblock(nn.Module):\n def __init__(self, bias=True, act=nn.ReLU(True)):\n super(example_resblock, self).__init__()\n modules = []\n modules.append(nn.Conv2d(16, 16, 3, padding=1), bias=bias)\n modules.append(act)\n modules.append(nn.Conv2d(16, 16, 3, padding=1), bias=bias)\n self.body = nn.Sequential(*modules)\n\n def forward(self, x):\n out = self.body(x)\n out += x\n return out\n\nclass resblock(nn.Module):\n def __init__(self, nFeat, kernel_size=3, bias=True, act=nn.ReLU(True)):\n super(resblock, self).__init__()\n modules = []\n modules.append(nn.Conv2d(nFeat, nFeat, kernel_size=kernel_size, padding=kernel_size // 2, bias=bias))\n modules.append(act)\n modules.append(nn.Conv2d(nFeat, nFeat, kernel_size=kernel_size, padding=kernel_size // 2, bias=bias))\n self.body = nn.Sequential(*modules)\n \n def forward(self, x):\n out = self.body(x)\n out += x\n return out\n\nclass upsampler(nn.Module):\n def __init__(self, scale=2, nFeat=16, act=nn.ReLU(True)):\n super(upsampler, self).__init__()\n #===== write your model definition here =====#\n modules = []\n modules.append(nn.Conv2d(nFeat, nFeat*4, kernel_size=3, padding=3 // 2, bias=True))\n modules.append(nn.PixelShuffle(scale))\n modules.append(act)\n self.body = nn.Sequential(*modules)\n\n def forward(self, x):\n #===== write your dataflow here =====#\n out = self.body(x)\n return out\n\nclass ZebraSRNet(nn.Module):\n def __init__(self, nFeat=64, kernel_size=3, nResBlock=8, imgChannel=3):\n super(ZebraSRNet, self).__init__()\n #===== write your model definition here using 'resblock' and 'upsampler' as the building blocks =====#\n modules1 = []\n modules1.append(nn.Conv2d(kernel_size, nFeat, kernel_size, padding= kernel_size//2, bias=True))\n self.body1 = nn.Sequential(*modules1)\n\n modules2 = []\n for i in range(0, nResBlock, 1):\n modules2.append(resblock(nFeat, kernel_size, True, nn.ReLU(True)))\n self.body2 = nn.Sequential(*modules2)\n\n modules3 = []\n modules3.append(upsampler(2, nFeat, act=nn.ReLU(True)))\n modules3.append(upsampler(2, nFeat, act=nn.ReLU(True)))\n modules3.append(nn.Conv2d(nFeat, kernel_size, kernel_size, padding= kernel_size//2, bias=True))\n self.body3 = nn.Sequential(*modules3)\n\n def forward(self, x):\n #===== write your dataflow here =====#\n out1 = self.body1(x)\n\n out2 = self.body2(out1)\n out2 += out1\n \n out3 = self.body3(out2)\n return out3","sub_path":"Free_study_6/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"98551214","text":"# If n is a prime number, then for every a, 1 < a < n-1\n# a^(n-1) % n = 1.\n\n# If given a prime, it always returns true, otherwise it is\n# probabalistic. (see Carmichael numbers). Accuracy can be increased\n# by performing multiple iterations.\n\nimport math\nimport random\n\nn = int(input(\"Enter a number for primality check: \"))\nk = int(input(\"Enter number of checks, greater means more accuracy: \"))\n\nb = True\ni = 0\n\nwhile (i 1 | pow(a, n-1) != 1:\n\t\tb = False\n\t\tprint(\"Composite.\")\n\ti += 1\nif ~b:\n\tprint(\"Probably prime.\")\t\n\n\t\t\n\t\t\n\t\t\n\n","sub_path":"code/fermat.py","file_name":"fermat.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"435986489","text":"import pandas as pd\nimport os \nfrom functools import reduce\n\nTHIS_FOLDER = os.path.dirname(os.path.abspath(__file__))\n\n# File 1/3: Population by community\ndf_pop = pd.read_csv(THIS_FOLDER + \"/csv/\" + \"population_by_community.csv\", sep =\";\")\ndf_pop[['index','Region']] = df_pop['Comunidades y Ciudades Autónomas'].str.split(' ', 1, expand=True) # Get rid of index in region name\ndf_pop.drop(['Comunidades y Ciudades Autónomas', 'index', 'Tamaño de los municipios', 'Periodo'], axis=1, inplace=True) # Remove redundant columns\ndf_pop = df_pop.iloc[1:] # Remove total row\ndf_pop.rename(columns = {'Total': 'Population'}, inplace = True)\n#print(df_pop.head(10))\n\n# File 2/3: Age 65 by community\ndf_age = pd.read_csv(THIS_FOLDER + \"/csv/\" + \"age_65_by_community.csv\", sep =\";\")\ndf_age[['index','Region']] = df_age['Comunidades y Ciudades Autónomas'].str.split(' ', 1, expand=True) # Get rid of index in region name\ndf_age.drop(['Comunidades y Ciudades Autónomas', 'index', 'Edad', 'Periodo'], axis=1, inplace=True) # Remove redundant columns\ndf_age = df_age.iloc[1:] # Remove total row\ndf_age.rename(columns = {'Total': 'Age65'}, inplace = True)\n#print(df_age.head(10))\n\n# File 3/3: Income by community // Cleaned manually\ndf_inc = pd.read_csv(THIS_FOLDER + \"/csv/\" + \"income.csv\", sep =\";\")\ndf_inc.rename(columns = {'Total': 'Income'}, inplace = True)\n#print(df_inc.head(10))\n\ndata_frames = [df_pop, df_age, df_inc]\ndf_merged = reduce(lambda left,right: pd.merge(left,right,on=['Region']), data_frames)\n\n# tail -40 covid_data.csv | cut -f2 -d ',' | sort -u # Bash command to get region 'id's in covid_data.csv \nregion_ids = [\"andalucia\", \"aragon\",\"asturias\",\"baleares\",\"canarias\",\"cantabria\",\"castilla-la_mancha\",\"castilla_y_leon\",\"cataluna\",\"c_valenciana\",\"extremadura\",\"galicia\",\"madrid\",\"murcia\",\"navarra\",\"pais_vasco\",\"la_rioja\",\"ceuta\",\"melilla\"]\ndf_merged['id'] = region_ids \n#print(df_merged)\n\ndf_merged.to_csv('metadata.csv', mode='w', header=True, index=False, sep = \";\")\n\n\n","sub_path":"merge_metadata.py","file_name":"merge_metadata.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"268297153","text":"__author__ = 'Victor'\nfrom random import random\n\nfrom Optimization.queens import QueenState\n\n# A state in an N-queens puzzle environment.\nclass QueenStateIndividual(QueenState):\n # Return the number of non-conflicting queen pairs.\n def fitness(self):\n return n*(n-1)/2 - self.conflicts()\n\n # Return a new individual that is a combination of self and other.\n def crossover(self, other):\n cross = (random(0, len(self.locations), len(other)))\n return cross\n\n # Give each location a small probability of getting randomly changed.\n def mutate(self):\n for i in range(self.locations):\n for j in range(self.locations):\n if random.rand(0,1) <= 0.01:\n self.locations[i], self.locations[j] = self.locations[j], self.locations[i]\n\n# A collection of states in a genetic algorithm.\nclass Population(object):\n def __init__(self):\n self.states = list()\n self.total_fitness = 0\n\n def add(self, state):\n fitness = state.fitness()\n self.total_fitness += fitness\n self.states.append((fitness, state))\n\n # Return the state with the highest fitness.\n def best(self):\n (fitness, state) = max(self.states)\n return state\n\n def select(self):\n select_population = []\n for i in range(len(self.states)):\n select_population.append((i, self.states[i]))\n return random(select_population[i])\n\n # Return another population of the same size as this one.\n # Use elitism: include the best state from this population.\n def nextgen(self):\n new_pop = list()\n new_pop += best()\n for state in range(len(self.states)-1):\n state1 = self.select()\n self.states.remove(state1)\n state2 = self.select()\n self.states.remove(state2)\n offspring = QueenStateIndividual.crossover(state1, state2)\n offspring = QueenStateIndividual.mutate(self)\n new_pop.append(offspring)\n self.states = new_pop\n return best(self.states)\n\n \nif __name__ == '__main__':\n n = 8 # The puzzle size\n pop = 100 # The population size\n\n # Initial population of random states\n population = Population()\n for count in range(pop):\n population.add(QueenStateIndividual(n))\n\n best = population.best()\n print(best.fitness(), \"after 0 generations\")\n\n generation = 0\n while best.fitness() < n*(n-1)/2:\n generation += 1\n population = population.nextgen()\n\n if population.best().fitness() > best.fitness():\n best = population.best()\n print(best.fitness(), \"after\", generation, \"generations\")\n\n best.display()\n","sub_path":"Queen State Genetic.py","file_name":"Queen State Genetic.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"510275301","text":"# do imports\nfrom django import forms\nfrom parsley.decorators import parsleyfy\nfrom django.forms.widgets import HiddenInput\nfrom django_countries import countries\nfrom mp3vidi.apps.mp3v_plans.models import BillingInfo, Order, PlanPricing, ShowMessage\nfrom datetime import date, datetime\nfrom calendar import monthrange\n\n\nclass CreditCardField(forms.IntegerField):\n\n def clean(self, value):\n if value and len(value) > 0 and (len(value) < 13 or len(value) > 16):\n raise forms.ValidationError(\"Please enter in a valid credit card number.\")\n # elif value and len(value) > 0 and self.get_cc_type(value) not in (\"Visa\", \"MasterCard\",\n # \"American Express\"):\n # raise forms.ValidationError(\"Please enter in a Visa, Master Card, or American Express credit card number.\")\n return super(CreditCardField, self).clean(value)\n\n\nclass CCExpWidget(forms.MultiWidget):\n def decompress(self, value):\n return [value.month, value.year] if value else [None, None]\n\n def format_output(self, rendered_widgets):\n html = u' / '.join(rendered_widgets)\n return u'%s' % html\n\n\nclass CCExpField(forms.MultiValueField):\n EXP_MONTH = [(x, x) for x in xrange(1, 13)]\n EXP_YEAR = [(x, x) for x in xrange(date.today().year,\n date.today().year + 15)]\n default_error_messages = {\n 'invalid_month': u'Enter a valid month.',\n 'invalid_year': u'Enter a valid year.',\n }\n\n def __init__(self, *args, **kwargs):\n errors = self.default_error_messages.copy()\n if 'error_messages' in kwargs:\n errors.update(kwargs['error_messages'])\n fields = (\n forms.ChoiceField(choices=self.EXP_MONTH,\n error_messages={'invalid': errors['invalid_month']}),\n forms.ChoiceField(choices=self.EXP_YEAR,\n error_messages={'invalid': errors['invalid_year']}),\n )\n super(CCExpField, self).__init__(fields, *args, **kwargs)\n self.widget = CCExpWidget(widgets=[fields[0].widget, fields[1].widget])\n\n def clean(self, value):\n exp = super(CCExpField, self).clean(value)\n if exp is not None and date.today() > exp:\n raise forms.ValidationError(\"The expiration date you entered is in the past.\")\n return exp\n\n def compress(self, data_list):\n if data_list:\n if data_list[1] in forms.fields.EMPTY_VALUES:\n error = self.error_messages['invalid_year']\n raise forms.ValidationError(error)\n if data_list[0] in forms.fields.EMPTY_VALUES:\n error = self.error_messages['invalid_month']\n raise forms.ValidationError(error)\n year = int(data_list[1])\n month = int(data_list[0])\n # find last day of the month\n day = monthrange(year, month)[1]\n return date(year, month, day)\n return None\n\n\n@parsleyfy\nclass OrderForm(forms.Form):\n plan_pricing = forms.ModelChoiceField(queryset=PlanPricing.objects.all(), widget=HiddenInput, required=True)\n\n\n@parsleyfy\nclass CreateOrderForm(forms.ModelForm):\n stripe_token = forms.CharField(widget=forms.HiddenInput(), required=True)\n purchased_credits = forms.IntegerField(required=False, label=\"Credits\", widget=forms.HiddenInput())\n number = CreditCardField(required=False, label=\"Card Number\", widget=forms.widgets.TextInput(attrs={'class': 'input-hg form-control', 'placeholder': 'Credit Card Number'}))\n holder = forms.CharField(required=False, label=\"Card Holder Name\", max_length=60, widget=forms.widgets.TextInput(attrs={'class': 'input-hg form-control', 'placeholder': 'Name on the Card'}))\n expiration = CCExpField(required=False, label=\"Expiration\")\n ccv_number = forms.IntegerField(required=False, label=\"CCV Number\", max_value=9999,\n widget=forms.TextInput(attrs={'size': '4','class': 'input-hg form-control', 'placeholder': 'Security Code (CVV/CCV)'}))\n\n def __init__(self, *args, **kwargs):\n self.payment_data = kwargs.pop('payment_data', None)\n super(CreateOrderForm, self).__init__(*args, **kwargs)\n\n def clean(self):\n cleaned = super(CreateOrderForm, self).clean()\n if not self.errors:\n result = self.process_payment()\n if result and result[0] == 'Card declined':\n raise forms.ValidationError('Your credit card was declined.')\n elif result and result[0] == 'Processing error':\n raise forms.ValidationError(\n 'We encountered the following error while processing your credit card: '+result[1])\n return cleaned\n\n def process_payment(self):\n if self.payment_data:\n # don't process payment if payment_data wasn't set\n datadict = self.cleaned_data\n datadict.update(self.payment_data)\n\n from virtualmerchant import VirtualMerchant\n vmerchant = VirtualMerchant(datadict)\n\n return vmerchant.process_virtualmerchant_payment()\n\n class Meta:\n model = Order\n exclude = ('user',)\n fields = []\n\n\n@parsleyfy\t\t\nclass BillingInfoForm(forms.ModelForm):\n class Meta:\n model = BillingInfo\n exclude = ('user', 'tax_number', 'stripe_token', 'card_fingerprint', 'card_last_4', 'card_kind', 'date_purged', 'card_name', 'card_exp_month', 'card_exp_year')\n\n def clean(self):\n cleaned_data = super(BillingInfoForm, self).clean()\n return cleaned_data\n\n\n@parsleyfy\nclass BillingInformationForm(forms.ModelForm):\n class Meta:\n model = BillingInfo\n exclude = ('user', 'tax_number', 'stripe_token', 'card_fingerprint', 'card_last_4', 'card_kind', 'date_purged', 'card_name', 'card_exp_month', 'card_exp_year')\n\n def __init__(self, *args, **kwargs):\n super(BillingInformationForm, self).__init__(*args, **kwargs)\n self.fields['name'].widget = forms.TextInput(attrs={\n 'placeholder': 'Full Name', 'class': 'input-hg form-control'})\n self.fields['city'].widget = forms.TextInput(attrs={\n 'placeholder': 'City', 'class': 'input-hg form-control'})\n self.fields['state'].widget = forms.TextInput(attrs={\n 'placeholder': 'State/province (if applicable)', 'class': 'input-hg form-control'})\n self.fields['zipcode'].widget = forms.TextInput(attrs={\n 'placeholder': 'Postal code', 'class': 'input-hg form-control'})\n self.fields['street'].widget = forms.TextInput(attrs={\n 'placeholder': 'Address', 'class': 'input-hg form-control'})\n self.fields['country'].widget = forms.Select(choices=list(countries), attrs={\n 'class': 'selectpicker dropup', 'data-size': 'auto'})\n\n def clean(self):\n cleaned_data = super(BillingInfoForm, self).clean()\n return cleaned_data\n\n\nclass BillingInfoWithoutShippingForm(BillingInfoForm):\n class Meta:\n model = BillingInfo\n exclude = ('user', 'tax_number', 'stripe_token', 'shipping_name', 'shipping_street', 'shipping_zipcode', 'shipping_city', 'shipping_state', 'card_fingerprint', 'card_last_4', 'card_kind', 'date_purged', 'card_name', 'card_exp_month', 'card_exp_year')\n\n def __init__(self, *args, **kwargs):\n super(BillingInfoWithoutShippingForm, self).__init__(*args, **kwargs)\n self.fields['name'].widget = forms.TextInput(attrs={\n 'placeholder': 'Full Name', 'class': 'input-hg form-control'})\n self.fields['city'].widget = forms.TextInput(attrs={\n 'placeholder': 'City', 'class': 'input-hg form-control'})\n self.fields['state'].widget = forms.TextInput(attrs={\n 'placeholder': 'State/province (if applicable)', 'class': 'input-hg form-control'})\n self.fields['zipcode'].widget = forms.TextInput(attrs={\n 'placeholder': 'Postal code', 'class': 'input-hg form-control'})\n self.fields['street'].widget = forms.TextInput(attrs={\n 'placeholder': 'Address', 'class': 'input-hg form-control'})\n self.fields['country'].widget = forms.Select(choices=list(countries), attrs={\n 'class': 'selectpicker dropup', 'data-size': 'auto'})\n\n def clean(self):\n cleaned_data = super(BillingInfoForm, self).clean()\n return cleaned_data\n\n\nclass BillingInfoWithoutShippingFormForOrder(BillingInfoForm):\n stripe_err = forms.CharField(widget=forms.HiddenInput(), required=False)\n\n class Meta:\n model = BillingInfo\n exclude = ('user', 'tax_number', 'stripe_token', 'shipping_name', 'shipping_street', 'shipping_zipcode', 'shipping_city', 'shipping_state', 'card_fingerprint', 'card_last_4', 'card_kind', 'date_purged', 'card_name', 'card_exp_month', 'card_exp_year')\n\n def clean(self):\n cleaned_data = super(BillingInfoForm, self).clean()\n return cleaned_data\n\nclass ShowMessageForm(forms.ModelForm):\n class Meta:\n model = ShowMessage\n exclude = ('user', )","sub_path":"apps/mp3v_plans/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":9137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"119127048","text":"\"\"\"Contains the functions for the CDS page.\n\nReturns CDS data.\nUpdates CDS data.\nQueries BLAST results.\n\nAttributes:\n response_object:\n The dictionary that is returned by the main functions.\n\"\"\"\nimport models\nfrom models import *\nimport helper\nimport pandas as pd\nimport re\nimport json\nfrom datetime import datetime\n\nresponse_object = {}\n\n# ------------------------------ MAIN FUNCTIONS ------------------------------\ndef annotate_cds(phage_id, request, cds_id, UPLOAD_FOLDER):\n \"\"\"Updates a CDS given the data and the ID.\n\n Updates the left, right, and function of a given CDS.\n If CDS does not exist returns an error message.\n Updates the CDS status.\n\n Args:\n request:\n A dictionary containing the new CDS data.\n cds_id:\n The ID of the CDS to be updated.\n\n Returns:\n A dictionary containing a pass or fail message.\n \"\"\"\n put_data = request.get_json()\n cds = Annotations.query.filter_by(phage_id=phage_id).filter_by(id=cds_id).first()\n if cds:\n if put_data.get('strand'):\n cds.strand = put_data.get('strand')\n cds.left = put_data.get('left')\n cds.right = put_data.get('right')\n cds.function = put_data.get('function')\n cds.notes = put_data.get('notes')\n else:\n cds.notes = put_data.get('notes')\n response_object['message'] = 'CDS updated!'\n else:\n response_object['message'] = 'CDS did not update.'\n coding_potential = {}\n genemark_gdata_file = helper.get_file_path(\"gdata\", UPLOAD_FOLDER)\n gdata_df = pd.read_csv(genemark_gdata_file, sep='\\t', skiprows=16)\n gdata_df.columns = ['Base', '1', '2', '3', '4', '5', '6']\n coding_potential['x_data'] = gdata_df[\"Base\"].to_list()\n coding_potential['y_data_1'] = gdata_df[\"1\"].to_list()\n coding_potential['y_data_2'] = gdata_df[\"2\"].to_list()\n coding_potential['y_data_3'] = gdata_df[\"3\"].to_list()\n coding_potential['y_data_4'] = gdata_df[\"4\"].to_list()\n coding_potential['y_data_5'] = gdata_df[\"5\"].to_list()\n coding_potential['y_data_6'] = gdata_df[\"6\"].to_list()\n \n if (cds.status == \"trnaDELETED\" or cds.status == \"tRNA\"):\n cds.status = put_data.get('status')\n cds.frame = put_data.get('frame')\n else:\n frame, status = helper.get_frame_and_status(cds.left, cds.right, cds.strand, coding_potential)\n cds.status = status\n cds.frame = frame\n db.session.commit()\n\n return response_object\n\ndef get_cds_data(phage_id, UPLOAD_FOLDER, cds_id):\n \"\"\"Queries and returns all of the data for a CDS given the ID.\n\n Gets left, right, function, and status for the CDS.\n Gets the right of the previous CDS and the left of the next CDS.\n Gets the blast results for the CDS.\n Gets the genemark coding potential data.\n Gets the next non-updated CDS ID.\n\n Args:\n UPLOAD_FOLDER:\n The folder containing all of the uploaded files.\n cds_id:\n The ID of the requested CDS.\n Returns:\n A dictionary containing the CDS data including the blast results and coding potential.\n \n \"\"\"\n num_begins = cds_id.rfind('_') + 1\n index = float(cds_id[num_begins:])\n index = int(index)\n prev_id = cds_id[:num_begins] + str(index - 1)\n next_id = cds_id[:num_begins] + str(index + 1)\n cds = Annotations.query.filter_by(phage_id=phage_id).filter_by(id=cds_id).first()\n prev_cds = Annotations.query.filter_by(phage_id=phage_id).filter_by(id=prev_id).first()\n next_cds = Annotations.query.filter_by(phage_id=phage_id).filter_by(id=next_id).first()\n if prev_cds is not None:\n response_object['prevCDS'] = prev_id\n response_object['prev_right'] = prev_cds.right\n else:\n response_object['prevCDS'] = 'undefined'\n response_object['prev_right'] = 0\n if next_cds is not None:\n response_object['next_left'] = next_cds.left\n else:\n response_object['next_left'] = cds.right\n response_object['cds'] = {'id': cds.id,\n 'left': cds.left,\n 'right': cds.right,\n 'strand': cds.strand,\n 'function': cds.function,\n 'status': cds.status,\n 'frame': cds.frame,\n 'notes': cds.notes}\n \n left_positions, right_positions = get_blasts(phage_id, cds.left)\n\n genemark_gdata_file = helper.get_file_path(\"gdata\", UPLOAD_FOLDER)\n gdata_df = pd.read_csv(genemark_gdata_file, sep='\\t', skiprows=16)\n gdata_df.columns = ['Base', '1', '2', '3', '4', '5', '6']\n try:\n gdata_df = gdata_df[gdata_df.Base.isin(\n range(min(left_positions) - 100, max(right_positions) + 100))]\n except:\n response_object['message'] = \"Not finished parsing\"\n return response_object\n response_object['message'] = \"Finished\"\n response_object['x_data'] = gdata_df[\"Base\"].to_list()\n response_object['y_data_1'] = gdata_df[\"1\"].to_list()\n response_object['y_data_2'] = gdata_df[\"2\"].to_list()\n response_object['y_data_3'] = gdata_df[\"3\"].to_list()\n response_object['y_data_4'] = gdata_df[\"4\"].to_list()\n response_object['y_data_5'] = gdata_df[\"5\"].to_list()\n response_object['y_data_6'] = gdata_df[\"6\"].to_list()\n\n reached_CDS = False\n response_object['nextCDS'] = 'undefined'\n for cds in db.session.query(Annotations).filter_by(phage_id=phage_id).order_by(Annotations.left):\n if reached_CDS and cds.function != \"@DELETED\" and cds.status != \"tRNA\":\n response_object['nextCDS'] = cds.id\n break\n elif cds.id == cds_id:\n reached_CDS = True\n\n reached_CDS = False\n response_object['prevCDS'] = 'undefined'\n for cds in db.session.query(Annotations).filter_by(phage_id=phage_id).order_by(Annotations.left.desc()):\n if reached_CDS and cds.function != \"@DELETED\" and cds.status != \"tRNA\":\n response_object['prevCDS'] = cds.id\n break\n elif cds.id == cds_id:\n reached_CDS = True\n if Gene_Calls.query.filter_by(phage_id=phage_id).filter_by(id='Glimmer').first():\n response_object['glimmer'] = Gene_Calls.query.filter_by(phage_id=phage_id).filter_by(id='Glimmer').first().calls.split(',')\n if Gene_Calls.query.filter_by(phage_id=phage_id).filter_by(id='GeneMark').first():\n response_object['genemark'] = Gene_Calls.query.filter_by(phage_id=phage_id).filter_by(id='GeneMark').first().calls.split(',')\n if Gene_Calls.query.filter_by(phage_id=phage_id).filter_by(id='Phanotate').first():\n response_object['phanotate'] = Gene_Calls.query.filter_by(phage_id=phage_id).filter_by(id='Phanotate').first().calls.split(',')\n if Gene_Calls.query.filter_by(phage_id=phage_id).filter_by(id='Prodigal').first():\n response_object['prodigal'] = Gene_Calls.query.filter_by(phage_id=phage_id).filter_by(id='Prodigal').first().calls.split(',')\n\n return response_object\n\n# ---------- BLAST HELPER FUNCTIONS ----------\ndef get_blasts(phage_id, left):\n \"\"\"Queries and returns all of the data for a CDS given the current left position.\n\n Gets alternate lefts and rights within range defined in settings\n Gets the blast results for all alternate CDS.\n\n Args:\n left:\n The left position of the current CDS.\n Returns:\n Lists of the alternate lefts and rights.\n \n \"\"\"\n dir_blasts = {}\n comp_blasts = {}\n all_blasts = {}\n dir_lefts = []\n dir_rights = []\n comp_lefts = []\n comp_rights = []\n lefts = []\n rights = []\n setting = db.session.query(Settings).filter_by(phage_id=phage_id).first()\n minimum = left - setting.back_left_range\n maximum = left + setting.forward_left_range\n for blast in db.session.query(Blast_Results).filter_by(phage_id=phage_id).filter_by(strand='+').order_by(Blast_Results.left):\n if blast.left > minimum and blast.left < maximum:\n lefts.append(blast.left)\n rights.append(blast.right)\n dir_lefts.append(blast.left)\n dir_rights.append(blast.right)\n dir_blasts[str(blast.left) + '-' + str(blast.right) + ' ' + blast.strand] = eval(blast.results)\n all_blasts[str(blast.left) + '-' + str(blast.right) + ' ' + blast.strand] = eval(blast.results)\n for blast in db.session.query(Blast_Results).filter_by(phage_id=phage_id).filter_by(strand='-').order_by(Blast_Results.left):\n if blast.left > minimum and blast.left < maximum:\n lefts.append(blast.left)\n rights.append(blast.right)\n comp_lefts.append(blast.left)\n comp_rights.append(blast.right)\n comp_blasts[str(blast.left) + '-' + str(blast.right) + ' ' + blast.strand] = eval(blast.results)\n all_blasts[str(blast.left) + '-' + str(blast.right) + ' ' + blast.strand] = eval(blast.results)\n response_object['comp_left_options'] = comp_lefts\n response_object['comp_right_options'] = comp_rights\n response_object['dir_left_options'] = dir_lefts\n response_object['dir_right_options'] = dir_rights\n response_object['dir_blast'] = dir_blasts\n response_object['comp_blast'] = comp_blasts\n response_object['all_blast'] = all_blasts\n return lefts, rights","sub_path":"back-end/annotations_cds.py","file_name":"annotations_cds.py","file_ext":"py","file_size_in_byte":9343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"40568263","text":"class MyHashMap:\r\n\r\n def __init__(self):\r\n self.D = dict()\r\n\r\n def put(self, key: int, value: int) -> None:\r\n \"\"\"\r\n value will always be non-negative.\r\n \"\"\"\r\n self.D[key] = value\r\n return None\r\n\r\n def get(self, key: int) -> int:\r\n \"\"\"\r\n Returns the value to which the specified key is mapped, or -1 if this map contains no mapping for the key\r\n \"\"\"\r\n if key in self.D.keys():\r\n return self.D[key]\r\n return -1\r\n\r\n def remove(self, key: int) -> None:\r\n \"\"\"\r\n Removes the mapping of the specified value key if this map contains a mapping for the key\r\n \"\"\"\r\n if key in self.D.keys():\r\n self.D.pop(key)\r\n\r\n\r\n# Your MyHashMap object will be instantiated and called as such:\r\nobj = MyHashMap()\r\nobj.put(1, 2)\r\nobj.put(2, 22)\r\nobj.put(3, 52)\r\nobj.put(4, 26)\r\nprint(obj.D)\r\nobj.put(1, 34445465)\r\nparam_2 = obj.get(2)\r\nprint(param_2)\r\nobj.remove(2)\r\nprint(obj.D)\r\n","sub_path":"HashMap.py","file_name":"HashMap.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"150828466","text":"# execfile('name_finder.py'); \r\n\r\n\r\nclass name_finder: \r\n\tdef __init__(self): \r\n\t\tself.NameMap2Short = defaultdict(list); \r\n\t\tself.NameMap2Long = defaultdict(list); \r\n\t\tself.NameMap2Rel = defaultdict(list); \r\n\t\tself.FilteredNames=[]; \r\n\r\n\tdef synsets(self, names): \r\n\t\t\"\"\"\r\n\t\t\texample: synsets(['jewelry.n.01', 'garment.n.01', ])\r\n\t\t\tnote of tool functions: \r\n\t\t\t\thypernym_leaves(['shoe', 'shirt', 'pant']) -- to find common hypernyms\r\n\t\t\t\tword_hypernyms('flower', 'n', False) -- to show hypernyms\r\n\t\t\t\twords_hyponyms([wordnet.synset('jewelry.n.01')]) -- to show hyponyms\r\n\t\t\t\trepr([s.name for s in synsets]) -- to show names\r\n\t\t\t\twordnet.synset('jewelry.n.01') -- to convert to synset\r\n\t\t\"\"\"\r\n\t\treturn set([wordnet.synset(n) for n in names]); \r\n\r\n\tdef check_synset(self, word, synset_collection): \r\n\t\ts=[s.name for s in name_check_hypernym_leaves(word, synset_collection)]\r\n\t\t#s=set([s for s,k in word_hypernyms(word,'n',False)]) & synset_collection)\r\n\t\treturn s;\r\n\r\n\tdef is_pos_in_synset(self, pos, synset_collection, words_except): \r\n\t\t\"\"\"\r\n\t\t\tcheck if any word in pos is in a synset collection \r\n\t\t\"\"\"\r\n\t\treturn sum([(len(n)>1) and len(name_check_hypernym_leaves(n, synset_collection)) for n,t in pos \r\n\t\t\t]) and not sum([(n.lower() in words_except) for n,t in pos]);\r\n\t\t#return sum([(len(n)>1) and (set([s for s,k in word_hypernyms(n,'n',False)]) & synset_collection) ) for n,t in pos \r\n\t\t#\t]) and not sum([n.lower() in words_except for n,t in pos]);\r\n\r\n\tdef find_attr_names(self, post_data, conf, category_name, to_print=1, to_recompute=False, use_cache=True): \r\n\t\t\"\"\"\r\n\t\t\tconf: [{'sbj': {'in':set(synsets), 'except':except}, \r\n\t\t\t\t\t'obj': {'in':list(words), 'except':except}, \r\n\t\t\t\t\t'name': ['adj','obj'], \r\n\t\t\t\t\t'short_name': ['obj'], \r\n\t\t\t\t\t },\r\n\t\t\t\t\t] \r\n\t\t\tfind names: 1) with any adj modifier; \r\n\t\t\t\t\t\t2) in synset_collection, but not in words_except\r\n\t\t\tresults appended to self.FilteredNames\r\n\t\t\"\"\"\r\n\t\tif not self.FilteredNames: \r\n\t\t\tself.FilteredNames=[[] for i in range(len(post_data))]; \r\n\t\t#\r\n\t\tfor id, post in enumerate(post_data): \r\n\t\t\tif to_print>=1: print('find_attr_names: %d/%d'%(id, len(post_data) )); \r\n\t\t\trs=[];\r\n\t\t\tif use_cache:\r\n\t\t\t\ttry:\r\n\t\t\t\t\trs,s=solr_query_var({'category':category_name, 'post_id_s':post['id'] }, page=-1 );\r\n\t\t\t\texcept: \r\n\t\t\t\t\tpass; \r\n\t\t\t#\r\n\t\t\tname_pairs=[]; \r\n\t\t\tif rs and not to_recompute: \r\n\t\t\t\tif 'name_pairs' in rs[0].keys(): \r\n\t\t\t\t\tname_pairs=decode_post_data(rs[0]['name_pairs_t']);\r\n\t\t\telse:\r\n\t\t\t\tcached_data1=decode_post_data(post['data_t']); \r\n\t\t\t\t#\r\n\t\t\t\tfor i, rel in enumerate(cached_data1['relations']):\r\n\t\t\t\t\tif to_print>=2: print(i, T(rel['sbj']), T(rel['verb']), T(rel['obj']),T(rel['adj']) );\r\n\t\t\t\t\tfor conf1 in conf: \r\n\t\t\t\t\t\tis_good=True;\r\n\t\t\t\t\t\tfor o in conf1.keys(): \r\n\t\t\t\t\t\t\tif o in ['name', 'short_name']: #name fields\r\n\t\t\t\t\t\t\t\tcontinue; \r\n\t\t\t\t\t\t\telif o not in rel.keys(): \r\n\t\t\t\t\t\t\t\tif to_print>=3: print(i, '-- not ', o );\r\n\t\t\t\t\t\t\t\tis_good=False; \r\n\t\t\t\t\t\t\t\tbreak; \r\n\t\t\t\t\t\t\telif conf1[o]==None: #should be None\r\n\t\t\t\t\t\t\t\tif o in rel.keys() and rel[o]: \r\n\t\t\t\t\t\t\t\t\tif to_print>=3: print(i, '-- not ', o );\r\n\t\t\t\t\t\t\t\t\tis_good=False; \r\n\t\t\t\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t\t\telif isinstance(conf1[o], basestring): #attribute should equal to value\r\n\t\t\t\t\t\t\t\tif conf1[o]!=T(rel[o]).lower(): \r\n\t\t\t\t\t\t\t\t\tif to_print>=3: print(i, '-- not ', o );\r\n\t\t\t\t\t\t\t\t\tis_good=False; \r\n\t\t\t\t\t\t\t\t\tbreak; \r\n\t\t\t\t\t\t\telif isinstance(conf1[o], dict) and 'in' in conf1[o].keys():\r\n\t\t\t\t\t\t\t\t#it uses 'in_set' and 'except_set' to store set including plural forms\r\n\t\t\t\t\t\t\t\tif 'except_set' not in conf1[o].keys():\r\n\t\t\t\t\t\t\t\t\tconf1[o]['except_set']=set([w.lower() for w in conf1[o]['except']]+[w.lower()+'s' for w in conf1[o]['except']]);\r\n\t\t\t\t\t\t\t\tif conf1[o]['in'] and isinstance(conf1[o]['in'], set): #set of synsets\r\n\t\t\t\t\t\t\t\t\tif not self.is_pos_in_synset(rel[o], conf1[o]['in'], conf1[o]['except_set']):\r\n\t\t\t\t\t\t\t\t\t\tif to_print>=3: print(i, '-- not ', o );\r\n\t\t\t\t\t\t\t\t\t\tis_good=False; \r\n\t\t\t\t\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t\t\t\telif conf1[o]['in'] and isinstance(conf1[o]['in'], list): #list of strings\r\n\t\t\t\t\t\t\t\t\tif 'in_set' not in conf1[o].keys():\r\n\t\t\t\t\t\t\t\t\t\tconf1[o]['in_set']=set([w.lower() for w in conf1[o]['in']]+[w.lower()+'s' for w in conf1[o]['in']]);\r\n\t\t\t\t\t\t\t\t\tif T(rel[o]).lower() not in conf1[o]['in_set'] or T(rel[o]).lower() in conf1[o]['except_set']:\r\n\t\t\t\t\t\t\t\t\t\tif to_print>=3: print(i, '-- not ', o );\r\n\t\t\t\t\t\t\t\t\t\tis_good=False;\r\n\t\t\t\t\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t\t\t\telif not conf1[o]['in']: #need any value\r\n\t\t\t\t\t\t\t\t\tif o not in rel.keys() or not rel[o]: \r\n\t\t\t\t\t\t\t\t\t\tif to_print>=3: print(i, '-- not ', o );\r\n\t\t\t\t\t\t\t\t\t\tis_good=False;\r\n\t\t\t\t\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t\t\t\telse: \r\n\t\t\t\t\t\t\t\t\tif to_print>=3: print(i, '-- not ', o );\r\n\t\t\t\t\t\t\t\t\tis_good=False;\r\n\t\t\t\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t\t\telse: \r\n\t\t\t\t\t\t\t\tif to_print>=3: print(i, '-- not ', o );\r\n\t\t\t\t\t\t\t\tis_good=False; \r\n\t\t\t\t\t\t\t\tbreak;\r\n\t\t\t\t\t\t#\r\n\t\t\t\t\t\tif is_good: \r\n\t\t\t\t\t\t\tif to_print>=2: print(i, T(rel['sbj']), T(rel['obj']), T(rel['adj']), T(rel['verb']) );\r\n\t\t\t\t\t\t\tshort_name='';\r\n\t\t\t\t\t\t\tname=T([t for o in conf1['name'] for t in rel[o]]);\r\n\t\t\t\t\t\t\tif 'short_name' in conf1.keys(): short_name=T([t for o in conf1['short_name'] for t in rel[o]]);\r\n\t\t\t\t\t\t\tname_pairs.append((name, short_name));\r\n\t\t\t\t\t\t\tself.NameMap2Rel[(name, short_name)].append({'r': rel, 'conf': conf1});\r\n\t\t\t\t\t\t#\r\n\t\t\t\tif use_cache:\r\n\t\t\t\t\trecord={'category':category_name, \r\n\t\t\t\t\t\t'post_id_s':cached_data1['id'], \r\n\t\t\t\t\t\t'date_dt': cached_data1['date_dt'],\r\n\t\t\t\t\t\t'updated_dt': datetime.now(),\r\n\t\t\t\t\t\t'name_pairs_t': encode_post_data(name_pairs),\r\n\t\t\t\t\t\t}; \r\n\t\t\t\t\tif rs: \r\n\t\t\t\t\t\trecord['id']=rs[0]['id']; \r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\trecord['id']=solr_new_id('%s--%s'%(category_name, cached_data1['id']));\r\n\t\t\t\t\ts=solr_update_var(record); \r\n\t\t\t\t\ts=solr_commit();\r\n\t\t\tfor name, short_name in name_pairs:\r\n\t\t\t\tif name and name not in self.FilteredNames[id]: \r\n\t\t\t\t\tif to_print>=2: print(name, short_name);\r\n\t\t\t\t\tself.FilteredNames[id].append(name); \r\n\t\t\t\t\tif short_name: \r\n\t\t\t\t\t\tif short_name not in self.NameMap2Short[name.lower()]: self.NameMap2Short[name.lower()].append(short_name);\r\n\t\t\t\t\t\tif name not in self.NameMap2Long[short_name.lower()]: self.NameMap2Long[short_name.lower()].append(name);\r\n\t\t\t\t#\r\n\tdef find_attr_names1(self, post_data, conf, to_print=True): # the plain old algorithm\r\n\t\tif not self.FilteredNames: \r\n\t\t\tself.FilteredNames=[[] for i in range(len(post_data))]; \r\n\t\t#\r\n\t\tfor id, post in enumerate(post_data): \r\n\t\t\tif to_print: print('find_attr_names: %d/%d'%(id, len(post_data) )); \r\n\t\t\tcached_data1=decode_post_data(post['data_t']); \r\n\t\t\t#\r\n\t\t\tfor rel in cached_data1['relations']:\r\n\t\t\t\tif rel['adj']: \r\n\t\t\t\t\tif rel['obj']:\r\n\t\t\t\t\t\tif self.is_pos_in_synset(rel['obj'], synset_collection, words_except):\r\n\t\t\t\t\t\t\tname=T(rel['adj']+rel['obj']);\r\n\t\t\t\t\t\t\tself.FilteredNames[id].append(name); \r\n\t\t\t\t\t\t\tself.NameMap2Short[name.lower()].append(T(rel['obj']));\r\n\t\t\t\t\t\t\tself.NameMap2Long[T(rel['obj']).lower()].append(name);\r\n\t\t\t\t\t\tif self.is_pos_in_synset(rel['sbj'], synset_collection, words_except):\r\n\t\t\t\t\t\t\tself.FilteredNames[id].append(T(rel['sbj'])); \r\n\t\t\t\t\telse: \r\n\t\t\t\t\t\tif self.is_pos_in_synset(rel['sbj'], synset_collection, words_except):\r\n\t\t\t\t\t\t\tname=T(rel['adj']+rel['sbj']);\r\n\t\t\t\t\t\t\tself.FilteredNames[id].append(name); \r\n\t\t\t\t\t\t\tself.NameMap2Long[T(rel['sbj']).lower()].append(name);\r\n\t\t\t\t\t\t\tself.NameMap2Short[name.lower()].append(T(rel['sbj']));\t\r\n\t\t\r\n","sub_path":"cgi-bin/name_finder.py","file_name":"name_finder.py","file_ext":"py","file_size_in_byte":7281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"491590917","text":"from datetime import date\nmaioresIdade = 0\nmenoresIdade = 0\n\nfor c in range(1,8):\n anoNasc = int(input('Informe o ano de nascimento da {}º pessoa: '.format(c)))\n if date.today().year - anoNasc >= 18:\n maioresIdade+=1\n else:\n menoresIdade+=1\nprint('Quantidade de maiores de idade: {} \\nQuantidade de menores de idade: {}'.format(maioresIdade, menoresIdade))","sub_path":"Exercicios/Estruturas de Controle/Ex054.py","file_name":"Ex054.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"105029302","text":"#!/usr/bin/python\n# name file : eksepsi_tutorial.py\n\nfrom __future__ import print_function\nimport sys\n\ndef main():\n\ttry:\n\t\tfilename = \"contoh.txt\" # file doesn't exist!\n\t\tf = open(filename) # IOError in here\n\t\n\t\t# membaca file contoh.txt\n\t\tfor line in f:\n\t\t\tprint(line,end='')\n\t\t\n\t\t# menutup file\n\t\tf.close()\n\n\texcept IOError as e:\n\t\tprint(\"FIle '%s' tidak ditemukan\" % filename)\n\t\tsys.exit()\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"eksepsi_tutorial.py","file_name":"eksepsi_tutorial.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"262449834","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 10 15:20:33 2020\r\n@author: santi\r\n\"\"\"\r\n\r\ndef save_gdf(data, file_name, geojson=False, shapefile=True):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import zipfile\r\n import os\r\n \r\n geojson_path = file_name + '.geojson'\r\n shape_path = file_name + '.shp'\r\n zip_path = file_name + '.zip'\r\n\r\n # -------------------------------------------------------\r\n # ----------- Save geojson (it's lighter) ---------------\r\n # -------------------------------------------------------\r\n if geojson:\r\n data.to_file(\r\n filename = geojson_path, \r\n driver=\"GeoJSON\"\r\n )\r\n\r\n # -------------------------------------------------------\r\n # ----------------- Save shapefile ----------------------\r\n # -------------------------------------------------------\r\n if shapefile:\r\n data.to_file(\r\n driver = 'ESRI Shapefile',\r\n filename = shape_path,\r\n )\r\n # create the .prj file\r\n prj_name = file_name + '.prj'\r\n prj = open(prj_name, \"w\")\r\n \r\n prj_write = 'GEOGCS[\"GCS_WGS_1984\",DATUM[\"D_WGS_1984\",SPHEROID[\"WGS_1984\",6378137,298.257223563]],PRIMEM[\"Greenwich\",0],UNIT[\"Degree\",0.017453292519943295]]'\r\n # call the function and supply the epsg code\r\n prj.write(prj_write)\r\n prj.close()\r\n \r\n if shapefile:\r\n extensions = ['.cpg', '.dbf','.prj', '.shp', '.shx']\r\n \r\n zipObj = zipfile.ZipFile(zip_path, 'w')\r\n \r\n for ex in extensions:\r\n zipObj.write(file_name + ex) \r\n os.remove(file_name + ex) # in case I want to remove the files out of the shapefile\r\n \r\n zipObj.close()\r\n \r\n \r\ndef import_gtfs(gtfs_path, busiest_date = True):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import os\r\n import pandas as pd\r\n import zipfile\r\n\r\n try:\r\n import partridge as ptg \r\n except ImportError as e:\r\n os.system('pip install partridge')\r\n import partridge as ptg\r\n\r\n try:\r\n import geopandas as gpd\r\n except ImportError as e:\r\n os.system('pip install geopandas')\r\n import geopandas as gpd\r\n # Partridge to read the feed\r\n # service_ids = pd.read_csv(gtfs_path + '/trips.txt')['service_id'].unique()\r\n # service_ids = frozenset(tuple(service_ids))\r\n \r\n if busiest_date:\r\n service_ids = ptg.read_busiest_date(gtfs_path)[1]\r\n else:\r\n with zipfile.ZipFile(gtfs_path) as myzip:\r\n myzip.extract(\"trips.txt\")\r\n service_ids = pd.read_csv('trips.txt')['service_id'].unique()\r\n service_ids = frozenset(tuple(service_ids))\r\n os.remove('trips.txt')\r\n \r\n view = {'trips.txt': {'service_id': service_ids}}\r\n \r\n feed = ptg.load_geo_feed(gtfs_path, view)\r\n \r\n routes = feed.routes\r\n trips = feed.trips\r\n stop_times = feed.stop_times\r\n stops = feed.stops\r\n shapes = feed.shapes\r\n \r\n # Get routes info in trips\r\n # The GTFS feed might be missing some of the keys, e.g. direction_id or shape_id.\r\n # To allow processing incomplete GTFS data, we must reindex instead:\r\n # https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#deprecate-loc-reindex-listlike\r\n # This will add NaN for any missing columns.\r\n trips = pd.merge(trips, routes, how='left').reindex(columns=['trip_id', 'route_id',\r\n 'service_id', 'direction_id','shape_id'])\r\n \r\n # Get trips, routes and stops info in stop_times\r\n stop_times = pd.merge(stop_times, trips, how='left') \r\n stop_times = pd.merge(stop_times, stops, how='left')\r\n # stop_times needs to be geodataframe if we want to do geometry operations\r\n stop_times = gpd.GeoDataFrame(stop_times, geometry='geometry')\r\n \r\n return routes, stops, stop_times, trips, shapes\r\n\r\ndef cut_gtfs(stop_times, stops, shapes):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import os\r\n import pandas as pd\r\n#--------------------------------------------------------\r\n os.system('apt install libspatialindex-dev')\r\n os.system('pip install rtree')\r\n#----------------------------------------------------------\r\n try:\r\n import geopandas as gpd \r\n except ImportError as e:\r\n os.system('pip install geopandas')\r\n import geopandas as gpd\r\n try:\r\n import utm\r\n except ImportError as e:\r\n os.system('pip install utm')\r\n import utm\r\n\r\n from shapely.ops import nearest_points\r\n from shapely.geometry import Point, LineString, MultiLineString, MultiPoint\r\n from shapely.ops import split\r\n from shapely import geometry, ops\r\n\r\n # Get the right epsg code for later conversations\r\n shapes.crs = {'init':'epsg:4326'}\r\n\r\n lat = shapes.geometry.iloc[0].coords[0][1]\r\n lon = shapes.geometry.iloc[0].coords[0][0]\r\n\r\n zone = utm.from_latlon(lat, lon)\r\n\r\n def code(zone):\r\n #The EPSG code is 32600+zone for positive latitudes and 32700+zone for negatives.\r\n if lat <0:\r\n epsg_code = 32700 + zone[2]\r\n else:\r\n epsg_code = 32600 + zone[2]\r\n return epsg_code\r\n\r\n epsg = code(zone)\r\n\r\n # direction_id is optional, as it is not needed to determine route shapes\r\n # However, if direction_id is NaN, pivot_table will return an empty DataFrame.\r\n # Therefore, use a sensible default if direction id is not known.\r\n # Some gtfs feeds only contain direction_id 0, use that as default\r\n stop_times['direction_id'] = stop_times['direction_id'].fillna(0)\r\n\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # --------------------- FIND THE CLOSEST POINT TO EACH LINE --------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------ \r\n\r\n # Data frame with stop sequence for route and direction\r\n sseq = stop_times.drop_duplicates(subset=['stop_id','stop_name', 'stop_sequence', 'shape_id'])[['route_id','direction_id','stop_id','stop_name', 'stop_sequence', 'shape_id']]\r\n\r\n # Data frames with the number of stops for each route and direction and shape_id\r\n route_shapes = sseq.pivot_table('stop_id',\r\n index = ['route_id', 'direction_id', 'shape_id'],\r\n aggfunc='count').reset_index()\r\n route_shapes.columns = ['route_id','direction_id', 'shape_id', 'stops_count']\r\n\r\n # List of shape_ids\r\n shape_id_list = shapes.shape_id.unique()\r\n\r\n # Create a DataFrame with the pair (stop, nearest_point) for each shape_id\r\n def find_shape_closest_points(shape_id):\r\n #shape_id = row.shape_id\r\n route_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'route_id'].values[0]\r\n direction_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'direction_id'].values[0]\r\n\r\n # Look for the shape\r\n shape = shapes.loc[shapes.shape_id == shape_id,'geometry'].values[0]\r\n\r\n\r\n # Look for the stop_ids of this shape\r\n route_stop_ids = sseq.loc[(sseq['route_id'] == route_id) \r\n & (sseq['direction_id'] == direction_id)\r\n &(sseq['shape_id'] == shape_id)]\r\n\r\n # Look for the geometry of these stops\r\n # merged = pd.merge(route_stop_ids, stops, how='left')\r\n # route_stop_geom = merged.geometry\r\n route_stop_geom = pd.merge(route_stop_ids, stops, how='left').geometry\r\n\r\n # Look for the nearest points of these stops that are in the shape\r\n points_in_shape = route_stop_geom.apply(lambda x: nearest_points(x, shape))\r\n\r\n d = dict(shape_id=shape_id, points=list(points_in_shape))\r\n\r\n return d\r\n\r\n shape_closest_points = [find_shape_closest_points(s) for s in shape_id_list]\r\n\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # --------------------- CREATE LINES THAT CUT THE SHAPE ------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n\r\n shape_trans_lines = pd.DataFrame()\r\n # First we define a function that will help us create the line to intersect the shape\r\n\r\n # ---------------- THIS IS THE VALUE YOU SHOULD CHANGE IF THE CUTTING GEOMETRY AND ---\r\n # ---------------- THE LINE INTERSECT -------------------------------------------------\r\n offset = 0.0001\r\n\r\n def create_line(row):\r\n # Formula to make the line longer\r\n # a = (y1-b)/x1\r\n # b = (y2-x2/x1*y1)/(1-x2/x1)\r\n if row[0] == row[1]:\r\n x1 = row[0].x - offset\r\n y1 = row[0].y - offset\r\n\r\n x2 = row[0].x \r\n y2 = row[0].y\r\n\r\n x3 = row[0].x + offset\r\n y3 = row[0].y + offset\r\n\r\n else: \r\n x1 = row[0].x\r\n y1 = row[0].y\r\n\r\n x2 = row[1].x\r\n y2 = row[1].y\r\n\r\n # If x2==x1 it will give the error \"ZeroDivisionError\"\r\n if float(x2) != float(x1):\r\n b = (y2-x2/x1*y1)/(1-x2/x1)\r\n a = (y1-b)/x1\r\n\r\n if x2 - x1 < 0: # We should create an \"if\" to check if we need to do -1 or +1 depending on x2-x1\r\n x3 = x2 - 3*(x1 - x2)#offset\r\n else:\r\n x3 = x2 + 3*(x2 - x1)#offset\r\n\r\n y3 = a*x3 + b\r\n\r\n else:\r\n x3 = x2\r\n b = 0\r\n a = 0\r\n\r\n if y2-y1 < 0:\r\n #y3 = y2 - offset/5\r\n y3 = y2 - 3*(y1-y2) #offset/10000000\r\n else: \r\n #y3 = y2 + offset/5\r\n y3 = y2 + 3*(y2-y1) #offset/10000000\r\n\r\n trans = LineString([Point(x1,y1), Point(x2,y2), Point(x3, y3)])\r\n return trans\r\n\r\n # For each shape we need to create transversal lines and separete the shape in segments \r\n def find_shape_trans_lines(shape_closest_points):\r\n # Choose the shape\r\n shape_id = shape_closest_points['shape_id']\r\n\r\n # Choose the pair (stop, nearest point to shape) to create the line\r\n scp = shape_closest_points['points']\r\n\r\n lines = [create_line(p) for p in scp]\r\n # scp.apply(create_line)\r\n\r\n d = dict(shape_id=shape_id, trans_lines=lines)\r\n\r\n return d\r\n\r\n shape_trans_lines = [find_shape_trans_lines(shape_closest_points[i]) for i in range(0, len(shape_closest_points))]\r\n\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------ CUT THE SHAPES --------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # Set the tolerance of the cuts\r\n tolerance = 0.0001\r\n\r\n loops_route_id = []\r\n loops_direction_id = []\r\n loops_shape_id = []\r\n\r\n def cut_shapes_(shape_trans_lines, shape_closest_points):\r\n shape_id = shape_trans_lines['shape_id']\r\n route_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'route_id'].values[0]\r\n direction_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'direction_id'].values[0]\r\n\r\n # Check if the line is simple (ie, doesn't intersect itself)\r\n line = shapes.loc[shapes.shape_id == shape_id, 'geometry'].values[0]\r\n if line.is_simple:\r\n # Split the shape in different segments\r\n trans_lines = shape_trans_lines['trans_lines']\r\n\r\n df = sseq.loc[(sseq['route_id'] == route_id) \r\n & (sseq['direction_id'] == direction_id)\r\n & (sseq['shape_id'] == shape_id)].reset_index()\r\n\r\n\r\n #df['segment'] = ''\r\n\r\n d = dict(shape_id = shape_id,route_id=route_id, direction_id=direction_id, stop_id = list(df.stop_id)[:-1], stop_sequence=list(df.stop_sequence)[:-1])\r\n\r\n if len(trans_lines) == 2:\r\n # In case there is a line with only two stops\r\n d['segment'] = [line]\r\n return d\r\n\r\n else:\r\n # trans_lines_all = MultiLineString(list(trans_lines.values))\r\n # trans_lines_cut = MultiLineString(list(trans_lines.values)[1:-1])\r\n\r\n # # Split the shape in different segments, cut by the linestrings created before\r\n # # The result is a geometry collection with the segments of the route\r\n # result = split(line, trans_lines_cut)\r\n try:\r\n trans_lines_all = MultiLineString(trans_lines)\r\n trans_lines_cut = MultiLineString(trans_lines[1:-1])\r\n\r\n # Split the shape in different segments, cut by the linestrings created before\r\n # The result is a geometry collection with the segments of the route\r\n result = split(line, trans_lines_cut)\r\n except ValueError:\r\n # If the cut points are on the line then try to cut with the points instead of lines\r\n test = shape_closest_points['points']\r\n cut_points = [test[i][1] for i in range(len(test))]\r\n cut_points = MultiPoint(cut_points[1:-1])\r\n result = split(line, cut_points)\r\n\r\n if len(result)==len(trans_lines_all)-1:\r\n d['segment'] = [s for s in result]\r\n\r\n return d\r\n else:\r\n loops_route_id.append(route_id)\r\n loops_direction_id.append(direction_id)\r\n loops_shape_id.append(shape_id) \r\n else:\r\n loops_route_id.append(route_id)\r\n loops_direction_id.append(direction_id)\r\n loops_shape_id.append(shape_id)\r\n\r\n segments = [cut_shapes_(shape_trans_lines[i], shape_closest_points[i]) for i in range(0, len(shape_trans_lines))]\r\n\r\n # Remove None values\r\n segments = [i for i in segments if i] \r\n\r\n loops = pd.DataFrame()\r\n loops['route_id'] = loops_route_id\r\n loops['direction_id'] = loops_direction_id\r\n loops['shape_id'] = loops_shape_id\r\n\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------- CUT THE SHAPES WITH LOOPS --------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------\r\n\r\n # Manage the lines with loops\r\n shapes_loop = shapes.loc[shapes.shape_id.isin(loops_shape_id)]\r\n\r\n aux = pd.DataFrame.from_dict(shape_trans_lines)\r\n trans_loop = aux.loc[aux.shape_id.isin(loops_shape_id)]\r\n\r\n aux = pd.DataFrame.from_dict(shape_closest_points)\r\n cut_points_loop = aux.loc[aux.shape_id.isin(loops_shape_id)]\r\n\r\n # Separate the shapes according to possible exceptions\r\n trans_loop['n_segments'] = trans_loop['trans_lines'].map(len)\r\n run_shapes_no_middle = False\r\n run_shapes_one_seg = False\r\n\r\n # Exception 1: Only three stops --> one cut point, two segments\r\n # If there's only one cut_point this will make the\r\n # script skip the \"Middle segments\" part\r\n # (with only one cut point there are only two segments)\r\n\r\n shapes_no_middle = shapes.loc[shapes.shape_id.isin(trans_loop.loc[trans_loop['n_segments'] ==3, 'shape_id'].unique())].reset_index()\r\n\r\n if len(shapes_no_middle) > 0:\r\n run_shapes_no_middle = True\r\n\r\n # Exception 2: Only two stops --> no cut points, one segments\r\n shapes_one_seg = shapes.loc[shapes.shape_id.isin(trans_loop.loc[trans_loop['n_segments'] ==2, 'shape_id'].unique())].reset_index()\r\n\r\n if len(shapes_one_seg) > 0 :\r\n run_shapes_one_seg = True\r\n\r\n # The rest of the shapes\r\n shapes_ok = shapes.loc[shapes.shape_id.isin(trans_loop.loc[trans_loop['n_segments'] >3, 'shape_id'].unique())].reset_index()\r\n\r\n def add_points(row, add_p, cut_points_gdf):\r\n # Calculate the min distance between the stops that intersect this segment\r\n index_track_ = row.name\r\n p = cut_points_gdf.loc[cut_points_gdf.index.isin(add_p.loc[add_p.index_track_==index_track_, 'index_cut'])]\r\n p.crs={'init':'epsg:4326'}\r\n\r\n seg = [LineString([p.geometry.values[i], p.geometry.values[i+1]]) for i in range(0,len(p)-1)]\r\n seg = gpd.GeoSeries(seg)\r\n seg.crs={'init':'epsg:4326'}\r\n dist = seg.to_crs(epsg).length.min() - 5\r\n\r\n\r\n gse = gpd.GeoSeries(row.geometry, index=[row.distance_m])\r\n gse.crs = {'init':'epsg:4326'}\r\n gse = gse.to_crs(epsg)\r\n\r\n length = gse.index[0]\r\n start = gse.values[0].coords[0]\r\n end = gse.values[0].coords[-1]\r\n\r\n num_vert = int(length/dist)\r\n\r\n new_points = [start] + [gse.values[0].interpolate(dist*n) for n in list(range(1, num_vert+1))] + [end]\r\n new_points = [Point(p) for p in new_points]\r\n new_line = LineString(new_points)\r\n\r\n check = gpd.GeoSeries([new_line])\r\n check.crs = {'init':'epsg:{}'.format(epsg)}\r\n check = check.to_crs(epsg=4326)\r\n return check[0]\r\n\r\n # Loop lines with more than three stops\r\n def cut_loops_shapes_ok(shape_id):\r\n # Set the ids\r\n route_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'route_id'].values[0]\r\n direction_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'direction_id'].values[0]\r\n\r\n df = sseq.loc[(sseq['route_id'] == route_id) \r\n & (sseq['direction_id'] == direction_id)\r\n & (sseq['shape_id'] == shape_id)].reset_index()\r\n\r\n d = dict(shape_id = shape_id,route_id=route_id, direction_id=direction_id, stop_id = list(df.stop_id)[:-1], stop_sequence=list(df.stop_sequence)[:-1])\r\n #d = dict(shape_id = shape_id,route_id=route_id, direction_id=direction_id, stop_id = list(df.stop_id), stop_sequence=list(df.stop_sequence))\r\n\r\n # All the necessary information to split the line\r\n # 1- line to be cut\r\n # 2- transversal lines to cut\r\n # 3- closest point on the line\r\n\r\n line = shapes_ok.loc[shapes_ok.shape_id == shape_id, 'geometry'].values[0] \r\n cut_lines = trans_loop.loc[trans_loop.shape_id==shape_id,'trans_lines'].values[0][1:-1] \r\n cut_points = [x[1] for x in cut_points_loop.loc[cut_points_loop.shape_id==shape_id,'points'].values[0][1:-1]]\r\n\r\n cut_gdf = gpd.GeoDataFrame(data=list(range(len(cut_lines))), geometry=cut_lines)\r\n cut_points_gdf = gpd.GeoDataFrame(data=list(range(len(cut_points))), geometry=cut_points)\r\n\r\n # ------------------------------------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------------------------------------\r\n # Make sure the shapes has a point every 100m\r\n # Create a GeoDataFrame with two point segments of the shape and its distance in meters\r\n shape = line.coords\r\n # Create two point segments for the shape\r\n track_l = gpd.GeoSeries([LineString([shape[i], shape[i+1]]) for i in range(0, len(shape)-1)])\r\n track_l.crs={'init':'epsg:4326'}\r\n #Calculate the length of each two point segment in meters\r\n track_dist = track_l.to_crs(epsg=epsg).length\r\n # Create the dataframe\r\n track_l_gdf = gpd.GeoDataFrame(data=dict(distance_m = track_dist), geometry = track_l)\r\n\r\n # Check where stops are closer than points of the track\r\n # To do that we intersect each segment between two segments of the track with our cut lines\r\n how_many = gpd.sjoin(track_l_gdf, cut_gdf, how='left', op='intersects', lsuffix='left', rsuffix='right').reset_index()\r\n how_many.rename(columns=dict(index='index_track_', index_right = 'index_cut'), inplace=True)\r\n\r\n # The filter those that were intersected by more than one cut line\r\n how_manyp = how_many.pivot_table('geometry', index='index_track_', aggfunc='count').reset_index()\r\n how_manyp = how_manyp.loc[how_manyp.geometry>1]\r\n\r\n add_p = how_many.loc[how_many.index_track_.isin(how_manyp.index_track_.unique())]\r\n\r\n # Add intermediate points for segments with length > 100m\r\n track_l_gdf.loc[track_l_gdf.index.isin(how_manyp.index_track_.unique()), 'geometry'] = track_l_gdf.loc[track_l_gdf.index.isin(how_manyp.index_track_.unique())] .apply(lambda x: add_points(x, add_p, cut_points_gdf), axis=1)\r\n\r\n #track_l_gdf.loc[track_l_gdf.distance_m>dist, 'geometry'] = track_l_gdf.loc[track_l_gdf.distance_m>dist].apply(lambda x: add_points(x, dist), axis=1)\r\n\r\n # Take the points and create the LineString again\r\n t = [list(g.coords)[:-1] for g in track_l_gdf.geometry]\r\n flat_list = [item for sublist in t for item in sublist] + [track_l_gdf.geometry.tail(1).values[0].coords[-1]]\r\n\r\n line = LineString(flat_list) \r\n\r\n # ------------------------------------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------------------------------------\r\n # ------------------------------------------------------------------------------------------------------------\r\n # First segment\r\n # We will use i to identify were the next segment should start\r\n for i in range(2, len(line.coords)):\r\n segment = LineString(line.coords[0:i])\r\n if segment.intersects(cut_lines[0]):\r\n points_to_stop = line.coords[0:i-1] + list(cut_points[0].coords)\r\n segment = LineString(points_to_stop)\r\n\r\n # Save the position of the point that makes it to the intersection\r\n #last_point = i\r\n last_point = i-1\r\n d['segment'] = [segment]\r\n #df.loc[0, 'segment'] = segment # assign the linestring to that segment\r\n\r\n break\r\n\r\n # Middle segments\r\n for l in range(1, len(cut_lines)):\r\n nearest_point = list(cut_points[l-1].coords) # segments always start in the one of the cut points\r\n start_iterator = last_point + 1 # start from the last point found in the previous segment\r\n\r\n for i in range(start_iterator, len(line.coords)+1):\r\n points_to_stop = nearest_point + line.coords[last_point:i] # keep adding points to extend the line\r\n segment = LineString(points_to_stop)\r\n\r\n if segment.intersects(cut_lines[l]): \r\n # if the line intersects with the cut line, define the segment\r\n # the segment goes from one cut point to the next one\r\n points_to_stop = nearest_point + line.coords[last_point:i-1] + list(cut_points[l].coords)\r\n segment = LineString(points_to_stop)\r\n\r\n # Save the position of the point that makes it to the intersection\r\n last_point = i-1\r\n d['segment'] = d['segment'] + [segment]\r\n break \r\n\r\n if i==(len(line.coords)):\r\n points_to_stop = nearest_point + list(cut_points[l].coords)\r\n segment = LineString(points_to_stop)\r\n d['segment'] = d['segment'] + [segment]\r\n\r\n # Last segment\r\n # We start at the last cut point and go all the way to the end\r\n nearest_point = list(cut_points[l].coords)\r\n points_to_stop = nearest_point + line.coords[last_point:len(line.coords)]\r\n segment = LineString(points_to_stop)\r\n\r\n d['segment'] = d['segment'] + [segment] \r\n\r\n return d\r\n\r\n segments1 = [cut_loops_shapes_ok(s) for s in shapes_ok.shape_id.unique()]\r\n # Remove None values\r\n segments1 = [i for i in segments1 if i] \r\n segments.extend(segments1)\r\n\r\n # Exception 1: Only three stops --> one cut point, two segments\r\n # If there's only one cut_point this will make the\r\n # script skip the \"Middle segments\" part\r\n # (with only one cut point there are only two segments)\r\n\r\n if run_shapes_no_middle:\r\n #for index, row in shapes_no_middle.iterrows():\r\n def cut_shapes_no_middle(shape_id):\r\n # Set the ids\r\n route_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'route_id'].values[0]\r\n direction_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'direction_id'].values[0]\r\n\r\n df = sseq.loc[(sseq['route_id'] == route_id) \r\n & (sseq['direction_id'] == direction_id)\r\n & (sseq['shape_id'] == shape_id)].reset_index()\r\n\r\n d = dict(shape_id = shape_id, route_id=route_id, direction_id=direction_id, stop_id = list(df.stop_id)[:-1], stop_sequence=list(df.stop_sequence)[:-1])\r\n\r\n # All the necessary information to split the line\r\n # 1- line to be cut\r\n # 2- transversal lines to cut\r\n # 3- closest point on the line\r\n\r\n line = shapes_no_middle.loc[shapes_no_middle.shape_id == shape_id, 'geometry'].values[0] \r\n cut_lines = trans_loop.loc[trans_loop.shape_id==shape_id,'trans_lines'].values[0][1:-1] \r\n cut_points = [x[1] for x in cut_points_loop.loc[cut_points_loop.shape_id==shape_id,'points'].values[0][1:-1]]\r\n\r\n # First segment\r\n # We will use i to identify were the next segment should start\r\n for i in range(2, len(line.coords)):\r\n segment = LineString(line.coords[0:i])\r\n\r\n if segment.intersects(cut_lines[0]):\r\n points_to_stop = line.coords[0:i-1] + list(cut_points[0].coords)\r\n segment = LineString(points_to_stop)\r\n\r\n # Save the position of the point that makes it to the intersection\r\n last_point = i\r\n d['segment'] = [segment]\r\n #df.loc[0, 'segment'] = segment # assign the linestring to that segment\r\n\r\n break\r\n\r\n # Last segment\r\n # We start at the last cut point and go all the way to the end\r\n nearest_point = list(cut_points[0].coords)\r\n points_to_stop = nearest_point + line.coords[last_point-1:len(line.coords)]\r\n segment = LineString(points_to_stop)\r\n\r\n d['segment'] = d['segment'] + [segment]\r\n\r\n return d\r\n\r\n # Apply the function\r\n segments2 = [cut_shapes_no_middle(s) for s in shapes_no_middle.shape_id.unique()]\r\n # Remove None values\r\n segments2 = [i for i in segments2 if i] \r\n segments.extend(segments2)\r\n\r\n # Exception 2: Only two stops --> no cut points, one segments\r\n if run_shapes_one_seg:\r\n #for index, row in shapes_one_seg.iterrows():\r\n def cut_shapes_one_seg(shape_id):\r\n # Set the ids\r\n route_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'route_id'].values[0]\r\n direction_id = route_shapes.loc[route_shapes.shape_id == shape_id, 'direction_id'].values[0]\r\n\r\n df = sseq.loc[(sseq['route_id'] == route_id) \r\n & (sseq['direction_id'] == direction_id)\r\n & (sseq['shape_id'] == shape_id)].reset_index()\r\n\r\n #df['segment'] = ''\r\n d = dict(shape_id = shape_id,route_id=route_id, direction_id=direction_id, stop_id = list(df.stop_id)[:-1], stop_sequence=list(df.stop_sequence)[:-1])\r\n\r\n line = shapes_one_seg.loc[shapes_one_seg.shape_id == shape_id, 'geometry'].values[0] \r\n d['segment'] = [line]\r\n return d\r\n\r\n # Apply function\r\n segments3 = [cut_shapes_one_seg(s) for s in shapes_one_seg.shape_id.unique()]\r\n # Remove None values\r\n segments3 = [i for i in segments3 if i] \r\n segments.extend(segments3)\r\n\r\n\r\n def format_shapes(s, last_id):\r\n df = pd.DataFrame()\r\n df['stop_sequence'] = s['stop_sequence']\r\n df['start_stop_id'] = s['stop_id']\r\n df['end_stop_id'] = s['stop_id'][1:] + [last_id]\r\n df['shape_id'] = s['shape_id']\r\n df['route_id'] = s['route_id']\r\n df['direction_id'] = s['direction_id']\r\n\r\n df['geometry'] = s['segment']\r\n\r\n return df\r\n\r\n df = pd.concat([format_shapes(s, sseq.loc[sseq.shape_id==s['shape_id']].tail(1).stop_id.values[0]) for s in segments])\r\n\r\n df = pd.merge(df, stops[['stop_id', 'stop_name']], left_on='start_stop_id', right_on='stop_id', how='left').drop('stop_id', axis=1)\r\n df.rename(columns=dict(stop_name='start_stop_name'), inplace=True)\r\n df = pd.merge(df, stops[['stop_id', 'stop_name']], left_on='end_stop_id', right_on='stop_id', how='left').drop('stop_id', axis=1)\r\n df.rename(columns=dict(stop_name='end_stop_name'), inplace=True)\r\n df['segment_id'] = df.start_stop_id + '-' + df.end_stop_id\r\n\r\n segments_gdf = gpd.GeoDataFrame(data = df.loc[:,['route_id','direction_id','stop_sequence','start_stop_name', 'end_stop_name', 'start_stop_id', 'end_stop_id','segment_id','shape_id']], geometry = df.geometry)\r\n\r\n segments_gdf.crs = {'init':'epsg:4326'}\r\n segments_gdf['distance_m'] = segments_gdf.geometry.to_crs(epsg=epsg).length\r\n\r\n return segments_gdf\r\n \r\ndef speeds_from_gtfs(routes, stop_times, segments_gdf, cutoffs = [0,6,9,15,19,22,24]):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import pandas as pd\r\n import math\r\n import os\r\n \r\n try:\r\n import geopandas as gpd \r\n except ImportError as e:\r\n os.system('pip install geopandas')\r\n import geopandas as gpd\r\n \r\n routes = routes\r\n stop_times = stop_times\r\n \r\n # Get the runtime between stops\r\n stop_times.sort_values(by = ['trip_id', 'stop_sequence'], ascending = True, inplace=True)\r\n \r\n first_try = stop_times.loc[:,['trip_id', 'arrival_time']]\r\n first_try['trip_id_next'] = first_try['trip_id'].shift(-1)\r\n first_try['arrival_time_next'] = first_try['arrival_time'].shift(-1)\r\n \r\n def runtime(row):\r\n if row.trip_id == row.trip_id_next:\r\n runtime = (row.arrival_time_next - row.arrival_time)/3600\r\n else:\r\n runtime = 0\r\n \r\n return runtime\r\n \r\n first_try['runtime_h'] = first_try.apply(runtime, axis=1)\r\n \r\n if len(first_try) == len(stop_times):\r\n stop_times['runtime_h'] = first_try['runtime_h']\r\n \r\n stop_times.head(2)\r\n\r\n # direction_id is optional, as it is not needed to determine speeds\r\n # However, if direction_id is NaN, pivot_table will return an empty DataFrame.\r\n # Therefore, use a sensible default if direction id is not known.\r\n # Some gtfs feeds only contain direction_id 0, use that as default\r\n stop_times['direction_id'] = stop_times['direction_id'].fillna(0)\r\n\r\n # Merge stop_times with segments_gdf to get the distance\r\n segments_gdf['direction_id'] = segments_gdf['direction_id'].map(int)\r\n segments_gdf['stop_sequence'] = segments_gdf['stop_sequence'].map(int)\r\n \r\n speeds = pd.merge(stop_times, segments_gdf[['route_id', 'direction_id', 'start_stop_id', 'stop_sequence', 'segment_id','shape_id', 'distance_m']], \r\n left_on = ['route_id', 'direction_id', 'stop_id', 'stop_sequence', 'shape_id'], \r\n right_on = ['route_id', 'direction_id', 'start_stop_id', 'stop_sequence', 'shape_id'],\r\n how = 'left').drop('start_stop_id', axis=1)\r\n \r\n speeds = speeds.loc[~speeds.distance_m.isnull(),\r\n ['trip_id', 'route_id', 'direction_id', 'shape_id', 'segment_id',\r\n 'arrival_time', 'departure_time', 'stop_id','stop_name',\r\n 'stop_sequence', 'runtime_h', 'distance_m','geometry']\r\n ]\r\n \r\n # Assign a time window to each row\r\n if max(cutoffs)<=24: \r\n speeds_ok = speeds.loc[speeds.departure_time < 24*3600]\r\n speeds_fix = speeds.loc[speeds.departure_time >= 24*3600]\r\n speeds_fix['departure_time'] = [d - 24*3600 for d in speeds_fix.departure_time]\r\n \r\n speeds = speeds_ok.append(speeds_fix)\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n l = str(w) + ':00'\r\n else:\r\n n = math.modf(w)\r\n l= str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n else:\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n if w > 24:\r\n w1 = w-24\r\n l = str(w1) + ':00'\r\n else:\r\n l = str(w) + ':00'\r\n labels = labels + [l]\r\n else:\r\n if w > 24:\r\n w1 = w-24\r\n n = math.modf(w1)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n else:\r\n n = math.modf(w)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n \r\n labels = [labels[i] + '-' + labels[i+1] for i in range(0, len(labels)-1)]\r\n \r\n speeds['departure_time'] = speeds['departure_time']/3600\r\n \r\n # Put each trips in the right window\r\n speeds['window'] = pd.cut(speeds['departure_time'], bins=cutoffs, right=False, labels=labels)\r\n speeds = speeds.loc[~speeds.window.isnull()]\r\n speeds['window'] = speeds['window'].astype(str)\r\n \r\n # Calculate the speed\r\n speeds.loc[speeds.runtime_h == 0.0, 'runtime_h'] = speeds.loc[speeds.runtime_h != 0.0, 'runtime_h'].mean()\r\n speeds['speed'] = round(speeds['distance_m']/1000/speeds['runtime_h'])\r\n speeds = speeds.loc[~speeds.speed.isnull()]\r\n \r\n # Calculate average speed to modify outliers\r\n avg_speed_route = speeds.pivot_table('speed',\r\n index=['route_id', 'direction_id','window'],\r\n aggfunc='mean').reset_index()\r\n avg_speed_route.rename(columns={'speed':'avg_speed_route'}, inplace=True)\r\n # Assign average speed to outliers\r\n speeds = pd.merge(speeds, avg_speed_route, how='left')\r\n speeds.loc[speeds.speed>120,'speed'] = speeds.loc[speeds.speed>120,'avg_speed_route']\r\n \r\n # Calculate max speed per segment to have a free_flow reference\r\n max_speed_segment = speeds.pivot_table('speed',\r\n index = ['stop_id', 'direction_id'],\r\n aggfunc='max')\r\n max_speed_segment.rename(columns={'speed':'max_kmh'}, inplace=True)\r\n \r\n \r\n # Get the average per route, direction, segment and time of day\r\n speeds_agg = speeds.pivot_table(['speed', 'runtime_h', 'avg_speed_route'],\r\n index=['route_id', 'direction_id', 'segment_id', 'window'],\r\n aggfunc = 'mean'\r\n ).reset_index()\r\n speeds_agg['route_id'] = speeds_agg['route_id'].map(str)\r\n speeds_agg['direction_id'] = speeds_agg['direction_id'].map(int)\r\n \r\n data = pd.merge(speeds_agg, segments_gdf, \r\n left_on=['route_id', 'direction_id', 'segment_id'],\r\n right_on = ['route_id', 'direction_id', 'segment_id'],\r\n how='left').reset_index().sort_values(by = ['route_id', 'direction_id','window','stop_sequence',], ascending=True)\r\n \r\n data.drop(['index'], axis=1, inplace=True)\r\n \r\n # Route name\r\n routes['route_name'] = ''\r\n if routes.route_short_name.isnull().unique()[0]:\r\n routes['route_name'] = routes.route_long_name\r\n elif routes.route_long_name.isnull().unique()[0]: \r\n routes['route_name'] = routes.route_short_name\r\n else:\r\n routes['route_name'] = routes.route_short_name + ' ' + routes.route_long_name\r\n data = pd.merge(data, routes[['route_id', 'route_name']], left_on='route_id', right_on='route_id', how='left')\r\n \r\n # Get the average per segment and time of day\r\n # Then add it to the rest of the data\r\n \r\n all_lines = speeds.pivot_table(['speed', 'runtime_h', 'avg_speed_route'],\r\n index=['segment_id', 'window'],\r\n aggfunc = 'mean'\r\n ).reset_index()\r\n \r\n data_all_lines = pd.merge(\r\n all_lines, \r\n segments_gdf.drop_duplicates(subset=['segment_id']), \r\n left_on=['segment_id'],\r\n right_on = ['segment_id'],\r\n how='left').reset_index().sort_values(by = ['direction_id','window','stop_sequence'], ascending=True)\r\n \r\n data_all_lines.drop(['index'], axis=1, inplace=True)\r\n data_all_lines['route_id'] = 'ALL_LINES'\r\n data_all_lines['route_name'] = 'All lines'\r\n data_all_lines['direction_id'] = 'NA'\r\n data_complete = data.append(data_all_lines)\r\n \r\n data_complete1 = data_complete.loc[~data_complete.route_name.isnull(), :].reset_index()\r\n \r\n \r\n # Get the columns in the right format\r\n int_columns = ['speed']\r\n \r\n for c in int_columns:\r\n data_complete1[c] = data_complete1[c].apply(lambda x: round(x,1))\r\n \r\n \r\n data_complete1 = data_complete1.loc[:,['route_id', 'route_name','direction_id','segment_id', 'window',\r\n 'speed', \r\n 'start_stop_id', 'start_stop_name', 'end_stop_id','end_stop_name', \r\n 'distance_m','stop_sequence', 'shape_id', 'runtime_h','geometry', ]] \r\n \r\n data_complete1.columns = ['route_id', 'route_name','dir_id', 'segment_id','window', \r\n 'speed',\r\n 's_st_id', 's_st_name', 'e_st_id','e_st_name',\r\n 'distance_m', 'stop_seq', 'shape_id','runtime_h', 'geometry']\r\n \r\n # Assign max speeds to each segment\r\n data_complete1 = pd.merge(data_complete1, max_speed_segment,\r\n left_on=['s_st_id', 'dir_id'], right_on = ['stop_id', 'direction_id'],\r\n how='left')\r\n \r\n gdf = gpd.GeoDataFrame(data = data_complete1.drop('geometry', axis=1), geometry=data_complete1.geometry)\r\n \r\n gdf.loc[gdf.dir_id==0,'dir_id'] = 'Inbound'\r\n gdf.loc[gdf.dir_id==1,'dir_id'] = 'Outbound'\r\n \r\n gdf.rename(columns={'speed': 'speed_kmh'}, inplace=True)\r\n gdf['speed_mph'] = gdf['speed_kmh']*0.621371\r\n gdf['max_mph'] = gdf['max_kmh']*0.621371\r\n \r\n gdf = gdf.drop(['shape_id'], axis=1).drop_duplicates()\r\n \r\n return gdf\r\n \r\ndef create_json(gdf, variable, filename,\r\n variable_label,\r\n filter_variables = [],\r\n filter_labels = [],\r\n colors = [],\r\n sizes = ['medium', 'medium', 'medium','medium','large','large'],\r\n breaks = [],\r\n default_values = [],\r\n symbol_layer = False,\r\n categories = ['Healthcare', 'Education', 'Food', 'Financial', 'Entertainment', 'Transportation', 'Others'], \r\n symbols = ['Hospital', 'School','Default', 'Official', 'Special', 'BusStop', 'Default'], \r\n ):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n \r\n import os\r\n import json\r\n import pandas as pd\r\n\r\n try:\r\n import utm\r\n except ImportError as e:\r\n os.system('pip install utm')\r\n import utm\r\n\r\n try:\r\n import jenkspy\r\n except ImportError as e:\r\n os.system('pip install jenkspy')\r\n import jenkspy\r\n if symbol_layer:\r\n # All categorical variable layer thing\r\n # We start with Remix Lightrail colors and then add default colors from Plotly\r\n # qualitative_palette = [blue, red, green, yellow, purple, aqua, pink, peach, melon]\r\n if colors == []:\r\n import plotly.express as px\r\n colors = ['#0066a1', '#a92023', '#066a40', '#e89b01', '#613fa6', '#024b50', '#a72051', '#a72f00', '#476800'] + px.colors.qualitative.Light24\r\n fill_color = pd.DataFrame(dict(variable=gdf[variable].unique(), fill_color = colors[0:len(gdf[variable].unique())]))\r\n gdf = pd.merge(gdf, fill_color, left_on=variable, right_on='variable', how='left')\r\n\r\n d = dict(\r\n category = categories,\r\n symbol = symbols\r\n )\r\n\r\n category_symbols = pd.DataFrame(d)\r\n\r\n gdf = pd.merge(gdf, category_symbols, how='left')\r\n\r\n var_symbol_color = gdf.pivot_table('id', index=[variable ,'symbol', 'fill_color'], aggfunc='count').reset_index()\r\n var_symbol_color['symbol_color'] = var_symbol_color.apply(lambda x: '{}{}'.format(x.symbol, x.fill_color), axis=1)\r\n\r\n symbols = []\r\n\r\n for v in gdf.variable.unique():\r\n aux = dict(\r\n input = v,\r\n value = var_symbol_color.loc[var_symbol_color[variable]==v,'symbol_color'].values[0]\r\n )\r\n symbols = symbols + [aux]\r\n\r\n icon = dict(\r\n type = 'categorical',\r\n values = symbols, # list of dict with values\r\n dataCol = variable, # could be amenity, group or catefory for example\r\n defaultValue = \"Default#000\"\r\n )\r\n\r\n label = dict(\r\n type = 'data-column',\r\n dataCol = 'name'\r\n )\r\n\r\n t = dict(\r\n type = 'symbol',\r\n icon = icon,\r\n label = label,\r\n configVersion = 1\r\n )\r\n else:\r\n # All line and circle numerical variable layers thing\r\n if colors == []:\r\n colors = [\"#D83D25\",\"#EF6933\",\"#F89041\",\"#fee090\",\"#91bfdb\",\"#4575b4\"],\r\n\r\n gdf[variable] = gdf[variable].map(int)\r\n \r\n if 'window' in list(gdf.columns):\r\n sort_windows=pd.DataFrame()\r\n sort_windows['window'] = gdf.window.unique()\r\n sort_windows['sort'] = [i.split(':')[0] for i in gdf.window.unique()]\r\n sort_windows['sort'] = sort_windows['sort'].astype(int)\r\n sort_windows.sort_values(by='sort', ascending=True, inplace=True)\r\n sort_windows.reset_index(inplace=True)\r\n \r\n # Calculate breaks the variable\r\n if breaks ==[]:\r\n breaks = jenkspy.jenks_breaks(gdf[variable], nb_class=len(colors))\r\n breaks = [int(b) for b in breaks]\r\n max_value = int(gdf[variable].max())\r\n bl = [int(b) for b in breaks]\r\n \r\n # Colors \r\n stops_color = []\r\n for i in range(len(colors)):\r\n aux = dict(input = bl[i], output = colors[i])\r\n stops_color = stops_color + [aux]\r\n \r\n color = dict(\r\n type='range',\r\n stops = stops_color,\r\n dataCol = variable,\r\n maxInput = max_value\r\n )\r\n \r\n # Sizes\r\n stops_size = []\r\n for i in range(len(colors)):\r\n aux = dict(input = bl[i], output = sizes[i])\r\n stops_size = stops_size + [aux]\r\n \r\n if gdf.geom_type[0] == 'Point':\r\n radius = dict(\r\n type='range',\r\n stops = stops_size,\r\n dataCol = variable,\r\n maxInput = max_value\r\n )\r\n gtype = 'circle'\r\n elif gdf.geom_type[0] == 'LineString':\r\n width = dict(\r\n type='range',\r\n stops = stops_size,\r\n dataCol = variable,\r\n maxInput = max_value\r\n )\r\n gtype = 'line'\r\n else:\r\n print(\"Check the geometry, it is not recognized as a LineString nor a Point\")\r\n \r\n # Legend labels\r\n filter_variables1 = [variable] + filter_variables\r\n filter_labels1 = [variable_label] + filter_labels\r\n \r\n legendLabels = dict(\r\n dataColLabels = {filter_variables1[i]: filter_labels1[i] for i in range(len(filter_variables1))}\r\n )\r\n \r\n # Filterable columns\r\n filterableColumns = []\r\n for f in filter_variables:\r\n if (f == 'route_name') & ('All lines' in list(gdf[f].unique())):\r\n aux = dict(\r\n values= ['All lines'] + list(gdf.loc[gdf.route_id!='ALL_LINES'].route_name.sort_values(ascending=True).unique()),\r\n dataCol = 'route_name',\r\n defaultValue = 'All lines'\r\n )\r\n elif (f != 'window')&(f != 'day_type'):\r\n if default_values[filter_variables.index(f)] == True:\r\n aux = dict(\r\n values = [str(x) for x in gdf[f].sort_values(ascending=True).unique()],\r\n dataCol = f,\r\n defaultValue = str(list(gdf[f].sort_values(ascending=True).unique())[0])\r\n )\r\n else:\r\n aux = dict(\r\n values = [str(x) for x in gdf[f].sort_values(ascending=True).unique()],\r\n dataCol = f\r\n )\r\n elif f == 'window':\r\n if len(sort_windows.window.unique())> 1:\r\n default_val = list(sort_windows.window.unique())[1]\r\n else:\r\n default_val = list(sort_windows.window.unique())[0]\r\n aux = dict(\r\n values = list(sort_windows.window.unique()),\r\n dataCol = 'window',\r\n defaultValue = default_val\r\n )\r\n elif f == 'day_type':\r\n aux = dict(\r\n values = ['Weekday', 'Saturday', 'Sunday'],\r\n dataCol = 'day_type',\r\n defaultValue = 'Weekday'\r\n )\r\n filterableColumns = filterableColumns + [aux]\r\n \r\n # Save the json file\r\n if gtype == 'circle':\r\n t = dict(\r\n type=gtype,\r\n color=color,\r\n radius=radius,\r\n legendLabels=legendLabels,\r\n configVersion= 1,\r\n filterableColumns=filterableColumns\r\n )\r\n elif gtype == 'line':\r\n t = dict(\r\n type=gtype,\r\n color=color,\r\n width=width,\r\n legendLabels=legendLabels,\r\n configVersion= 1,\r\n filterableColumns=filterableColumns\r\n )\r\n json_name = 'json_' + filename + '.json'\r\n with open(json_name, 'w') as outfile:\r\n json.dump(t, outfile)\r\n\r\ndef stops_freq(stop_times, stops, cutoffs = [0,6,9,15,19,22,24]):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import math\r\n import pandas as pd\r\n import os\r\n import re\r\n \r\n try:\r\n import geopandas as gpd \r\n except ImportError as e:\r\n os.system('pip install geopandas')\r\n import geopandas as gpd\r\n \r\n hours = list(range(25))\r\n hours_labels = [str(hours[i]) + ':00' for i in range(len(hours)-1)]\r\n \r\n if max(cutoffs)<=24: \r\n stop_times_ok = stop_times.loc[stop_times.departure_time < 24*3600]\r\n stop_times_fix = stop_times.loc[stop_times.departure_time >= 24*3600]\r\n stop_times_fix['departure_time'] = [d - 24*3600 for d in stop_times_fix.departure_time]\r\n \r\n stop_times = stop_times_ok.append(stop_times_fix)\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n l = str(w) + ':00'\r\n else:\r\n n = math.modf(w)\r\n l= str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n else:\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n if w > 24:\r\n w1 = w-24\r\n l = str(w1) + ':00'\r\n else:\r\n l = str(w) + ':00'\r\n labels = labels + [l]\r\n else:\r\n if w > 24:\r\n w1 = w-24\r\n n = math.modf(w1)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n else:\r\n n = math.modf(w)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n \r\n labels = [labels[i] + '-' + labels[i+1] for i in range(0, len(labels)-1)]\r\n \r\n stop_times['departure_time'] = stop_times['departure_time']/3600\r\n \r\n # Put each trips in the right window\r\n stop_times['window'] = pd.cut(stop_times['departure_time'], bins=cutoffs, right=False, labels=labels)\r\n stop_times = stop_times.loc[~stop_times.window.isnull()]\r\n stop_times['window'] = stop_times['window'].astype(str)\r\n stop_times['hour'] = pd.cut(stop_times['departure_time'], bins=hours, right=False, labels=hours_labels)\r\n stop_times['hour'] = stop_times['hour'].astype(str)\r\n\r\n # direction_id is optional, as it is not needed to determine trip frequencies\r\n # However, if direction_id is NaN, pivot_table will return an empty DataFrame.\r\n # Therefore, use a sensible default if direction id is not known.\r\n # Some gtfs feeds only contain direction_id 0, use that as default\r\n stop_times['direction_id'] = stop_times['direction_id'].fillna(0)\r\n trips_per_window = stop_times.pivot_table('trip_id', index=['stop_id', 'direction_id','window'], aggfunc='count').reset_index()\r\n trips_per_hour = stop_times.pivot_table('trip_id', index=['stop_id', 'direction_id','hour'], aggfunc='count').reset_index()\r\n \r\n trips_per_hour.rename(columns={'trip_id':'max_trips'}, inplace=True)\r\n trips_per_hour['max_frequency'] = (60/trips_per_hour['max_trips']).astype(int)\r\n \r\n max_trips = trips_per_hour.pivot_table('max_trips', index=['stop_id', 'direction_id'], aggfunc='max').reset_index()\r\n max_freq = trips_per_hour.pivot_table('max_frequency', index=['stop_id', 'direction_id'], aggfunc='min').reset_index()\r\n \r\n trips_per_window.rename(columns={'trip_id':'ntrips'}, inplace=True)\r\n start_time = trips_per_window['window'].apply(lambda x: int(x.split(':')[0]))\r\n end_time = trips_per_window['window'].apply(lambda x: int(re.search('-(.*?):', x).group(1)))\r\n \r\n trips_per_window['frequency'] = ((end_time - start_time)*60 / trips_per_window.ntrips).astype(int)\r\n stop_frequencies = pd.merge(trips_per_window, max_trips, how = 'left')\r\n stop_frequencies = pd.merge(stop_frequencies, max_freq, how = 'left')\r\n stop_frequencies = pd.merge(stop_frequencies, stops.loc[:, ['stop_id', 'stop_name', 'geometry']], how='left')\r\n stop_frequencies = gpd.GeoDataFrame(data=stop_frequencies.drop('geometry', axis=1), geometry=stop_frequencies.geometry)\r\n \r\n # This is a bit suspect, since some gtfs feeds seem to only use direction_id 0\r\n stop_frequencies.loc[stop_frequencies.direction_id == 0, 'direction_id'] = 'Inbound'\r\n stop_frequencies.loc[stop_frequencies.direction_id == 1, 'direction_id'] = 'Outbound'\r\n \r\n stop_frequencies.rename(columns={\r\n 'direction_id': 'dir_id',\r\n 'max_frequency': 'max_freq'\r\n }, inplace=True)\r\n stop_frequencies.sort_values(by='frequency', ascending=False, inplace=True)\r\n \r\n return stop_frequencies\r\n \r\ndef map_gdf(gdf, variable,\r\n colors = [\"#d13870\", \"#e895b3\" ,'#55d992', '#3ab071', '#0e8955','#066a40'],\r\n tooltip_var = [],\r\n tooltip_labels = [],\r\n breaks = []):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import branca\r\n import pandas as pd\r\n import os\r\n import plotly.express as px\r\n try:\r\n import jenkspy\r\n except ImportError as e:\r\n os.system('pip install jenkspy')\r\n import jenkspy\r\n \r\n try:\r\n import folium\r\n except ImportError as e:\r\n os.system('pip install folium')\r\n import folium\r\n\r\n # Look for the center of the map\r\n minx, miny, maxx, maxy = gdf.geometry.total_bounds\r\n \r\n centroid_lat = miny + (maxy - miny)/2\r\n centroid_lon = minx + (maxx - minx)/2 \r\n \r\n if isinstance(gdf[variable].values[0], str):\r\n categorical = True\r\n else: \r\n categorical = False\r\n \r\n # Calculate the breaks if they were not specified\r\n if (breaks == []) & (not categorical):\r\n breaks = jenkspy.jenks_breaks(gdf[variable], nb_class=len(colors))\r\n breaks = [int(b) for b in breaks]\r\n \r\n m = folium.Map(location=[centroid_lat, centroid_lon], \r\n tiles='cartodbpositron', zoom_start=12\r\n )\r\n # If the variable is categorical\r\n if categorical:\r\n gdf['radius'] = 5\r\n # qualitative_palette = [blue, red, green, yellow, purple, aqua, pink, peach,melon]\r\n # We start with Remix Lightrail colors and then add default colors from Plotly\r\n qualitative_palette = ['#0066a1', '#a92023', '#066a40', '#e89b01', '#613fa6', '#024b50', '#a72051', '#a72f00', '#476800']\r\n color_palette = qualitative_palette + px.colors.qualitative.Pastel + px.colors.qualitative.Prism + px.colors.qualitative.Vivid + px.colors.qualitative.Light24\r\n fill_color = pd.DataFrame(dict(variable=gdf[variable].unique(), fill_color = color_palette[0:len(gdf[variable].unique())])) \r\n gdf=pd.merge(gdf, fill_color, left_on=variable, right_on='variable', how='left')\r\n # If the variable is numerical\r\n else:\r\n gdf['radius'] = gdf[variable]\r\n index = [int(b) for b in breaks]\r\n colorscale = branca.colormap.StepColormap(colors, index = index, caption=variable)\r\n gdf['fill_color'] = gdf[variable].apply(lambda x: colorscale(x)) \r\n \r\n if gdf.geom_type.values[0] == 'Point':\r\n # my code for circles\r\n # Create the circles\r\n for i in range(int(len(gdf))):\r\n folium.CircleMarker(\r\n location=[gdf.loc[i, 'geometry'].y, gdf.loc[i, 'geometry'].x], \r\n radius = float(gdf.loc[i, 'radius']),\r\n #popup=geo_data.loc[i, 'stop_name'], \r\n tooltip = tooltip_labels[0] + str(gdf.loc[i, tooltip_var[0]]), \r\n color='#ffffff00',\r\n fill = True,\r\n fill_opacity = .7,\r\n fill_color = str(gdf.loc[i, 'fill_color'])\r\n ).add_to(m)\r\n else:\r\n # Styling function for LineStrings \r\n def style_function(feature):\r\n return {\r\n 'fillOpacity': 0.5,\r\n 'weight': 3,#math.log2(feature['properties']['speed'])*2,\r\n 'color': feature['properties']['fill_color']\r\n }\r\n # my code for lines\r\n geo_data = gdf.__geo_interface__\r\n folium.GeoJson(\r\n geo_data, \r\n style_function = style_function,\r\n tooltip = folium.features.GeoJsonTooltip(fields=tooltip_var,\r\n aliases = tooltip_labels,\r\n labels=True,\r\n sticky=False)\r\n ).add_to(m)\r\n \r\n return m\r\n\r\ndef lines_freq(stop_times, trips, shapes, routes, cutoffs = [0,6,9,15,19,22,24]):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import math\r\n import pandas as pd\r\n import os\r\n import re\r\n \r\n try:\r\n import geopandas as gpd \r\n except ImportError as e:\r\n os.system('pip install geopandas')\r\n import geopandas as gpd\r\n \r\n # Generate the hours of the day\r\n hours = list(range(25))\r\n hours_labels = [str(hours[i]) + ':00' for i in range(len(hours)-1)]\r\n \r\n # Generate the time windows and cutoffs\r\n if max(cutoffs)<=24: \r\n stop_times_ok = stop_times.loc[stop_times.departure_time < 24*3600]\r\n stop_times_fix = stop_times.loc[stop_times.departure_time >= 24*3600]\r\n stop_times_fix['departure_time'] = [d - 24*3600 for d in stop_times_fix.departure_time]\r\n \r\n stop_times = stop_times_ok.append(stop_times_fix)\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n l = str(w) + ':00'\r\n else:\r\n n = math.modf(w)\r\n l= str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n else:\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n if w > 24:\r\n w1 = w-24\r\n l = str(w1) + ':00'\r\n else:\r\n l = str(w) + ':00'\r\n labels = labels + [l]\r\n else:\r\n if w > 24:\r\n w1 = w-24\r\n n = math.modf(w1)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n else:\r\n n = math.modf(w)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n \r\n # Generate the labels\r\n labels = [labels[i] + '-' + labels[i+1] for i in range(0, len(labels)-1)]\r\n \r\n stop_times['departure_time'] = stop_times['departure_time']/3600\r\n \r\n # Put each trips in the right window\r\n stop_times['window'] = pd.cut(stop_times['departure_time'], bins=cutoffs, right=False, labels=labels)\r\n stop_times = stop_times.loc[~stop_times.window.isnull()]\r\n stop_times['window'] = stop_times['window'].astype(str)\r\n stop_times['hour'] = pd.cut(stop_times['departure_time'], bins=hours, right=False, labels=hours_labels)\r\n stop_times['hour'] = stop_times['hour'].astype(str)\r\n \r\n stop_times_first = stop_times.loc[stop_times.stop_sequence==1,:]\r\n \r\n # direction_id is optional, as it is not needed to determine line frequencies\r\n # However, if direction_id is NaN, pivot_table will return an empty DataFrame.\r\n # Therefore, use a sensible default if direction id is not known.\r\n # Some gtfs feeds only contain direction_id 0, use that as default\r\n stop_times['direction_id'] = stop_times['direction_id'].fillna(0)\r\n # Count number of trips per windows and hour\r\n trips_per_window = stop_times_first.pivot_table('trip_id', index=['route_id','direction_id','window'], aggfunc='count').reset_index()\r\n trips_per_hour = stop_times_first.pivot_table('trip_id', index=['route_id', 'direction_id','hour'], aggfunc='count').reset_index()\r\n \r\n # Calculate the hourly frequency\r\n trips_per_hour.rename(columns={'trip_id':'max_trips'}, inplace=True)\r\n trips_per_hour['max_frequency'] = (60/trips_per_hour['max_trips']).astype(int)\r\n \r\n # Get max number of trips and highest frequency\r\n max_trips = trips_per_hour.pivot_table('max_trips', index=['route_id', 'direction_id'], aggfunc='max').reset_index()\r\n max_freq = trips_per_hour.pivot_table('max_frequency', index=['route_id', 'direction_id'], aggfunc='min').reset_index()\r\n \r\n # Calculate frequency per window for each route\r\n trips_per_window.rename(columns={'trip_id':'ntrips'}, inplace=True)\r\n start_time = trips_per_window['window'].apply(lambda x: int(x.split(':')[0]))\r\n end_time = trips_per_window['window'].apply(lambda x: int(re.search('-(.*?):', x).group(1)))\r\n \r\n trips_per_window['frequency'] = ((end_time - start_time)*60 / trips_per_window.ntrips).astype(int)\r\n line_frequencies = pd.merge(trips_per_window, max_trips, how = 'left')\r\n line_frequencies = pd.merge(line_frequencies, max_freq, how = 'left')\r\n \r\n aux = trips.loc[trips.service_id=='1',['route_id', 'direction_id', 'shape_id']].drop_duplicates()\r\n aux = pd.merge(line_frequencies, aux, how='left')\r\n line_frequencies_gdf = pd.merge(aux, shapes, how='left')\r\n # Route name\r\n routes['route_name'] = ''\r\n if routes.route_short_name.isnull().unique()[0]:\r\n routes['route_name'] = routes.route_long_name\r\n elif routes.route_long_name.isnull().unique()[0]: \r\n routes['route_name'] = routes.route_short_name\r\n else:\r\n routes['route_name'] = routes.route_short_name + ' ' + routes.route_long_name\r\n\r\n line_frequencies_gdf = pd.merge(line_frequencies_gdf, routes[['route_id', 'route_name']])\r\n \r\n gdf = gpd.GeoDataFrame(data=line_frequencies_gdf.drop('geometry', axis=1), geometry=line_frequencies_gdf.geometry)\r\n \r\n gdf.loc[gdf.direction_id == 0, 'direction_id'] = 'Inbound'\r\n gdf.loc[gdf.direction_id == 1, 'direction_id'] = 'Outbound'\r\n \r\n \r\n gdf.rename(columns={\r\n 'direction_id': 'dir_id',\r\n 'max_frequency': 'max_freq',\r\n }, inplace=True)\r\n \r\n gdf = gdf.loc[:,['route_id', 'route_name', 'dir_id', 'window',\r\n 'frequency', 'ntrips',\r\n 'max_freq', 'max_trips', 'geometry']]\r\n gdf = gdf.loc[~gdf.geometry.isnull()]\r\n gdf.sort_values(by='frequency', ascending=False, inplace=True)\r\n \r\n return gdf\r\n \r\ndef segments_freq(segments_gdf, stop_times, routes, cutoffs = [0,6,9,15,19,22,24]):\r\n import warnings\r\n warnings.filterwarnings(\"ignore\")\r\n import math\r\n import pandas as pd\r\n import os\r\n import re\r\n \r\n try:\r\n import geopandas as gpd \r\n except ImportError as e:\r\n os.system('pip install geopandas')\r\n import geopandas as gpd\r\n \r\n # Generate the hours of the day\r\n hours = list(range(25))\r\n hours_labels = [str(hours[i]) + ':00' for i in range(len(hours)-1)]\r\n\r\n # Generate the time windows and cutoffs\r\n if max(cutoffs)<=24: \r\n stop_times_ok = stop_times.loc[stop_times.departure_time < 24*3600]\r\n stop_times_fix = stop_times.loc[stop_times.departure_time >= 24*3600]\r\n stop_times_fix['departure_time'] = [d - 24*3600 for d in stop_times_fix.departure_time]\r\n\r\n stop_times = stop_times_ok.append(stop_times_fix)\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n l = str(w) + ':00'\r\n else:\r\n n = math.modf(w)\r\n l= str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n else:\r\n labels = []\r\n for w in cutoffs:\r\n if float(w).is_integer():\r\n if w > 24:\r\n w1 = w-24\r\n l = str(w1) + ':00'\r\n else:\r\n l = str(w) + ':00'\r\n labels = labels + [l]\r\n else:\r\n if w > 24:\r\n w1 = w-24\r\n n = math.modf(w1)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n else:\r\n n = math.modf(w)\r\n l = str(int(n[1])) + ':' + str(int(n[0]*60))\r\n labels = labels + [l]\r\n\r\n # Generate the labels\r\n labels = [labels[i] + '-' + labels[i+1] for i in range(0, len(labels)-1)]\r\n\r\n stop_times['departure_time'] = stop_times['departure_time']/3600\r\n\r\n # Put each trips in the right window\r\n stop_times['window'] = pd.cut(stop_times['departure_time'], bins=cutoffs, right=False, labels=labels)\r\n stop_times = stop_times.loc[~stop_times['window'].isnull()]\r\n stop_times['window'] = stop_times['window'].astype(str)\r\n\r\n stop_times['hour'] = pd.cut(stop_times['departure_time'], bins=hours, right=False, labels=hours_labels)\r\n stop_times['hour'] = stop_times['hour'].astype(str)\r\n\r\n # Count number of trips per windows and hour\r\n\r\n # direction_id is optional, as it is not needed to determine segment frequencies\r\n # However, if direction_id is NaN, pivot_table will return an empty DataFrame.\r\n # Therefore, use a sensible default if direction id is not known.\r\n # Some gtfs feeds only contain direction_id 0, use that as default\r\n stop_times['direction_id'] = stop_times['direction_id'].fillna(0)\r\n trips_per_window = stop_times.pivot_table('trip_id', index=['route_id','stop_id', 'direction_id','window'], aggfunc='count').reset_index()\r\n trips_per_hour = stop_times.pivot_table('trip_id', index=['route_id','stop_id', 'direction_id','hour'], aggfunc='count').reset_index()\r\n\r\n # Calculate the hourly frequency\r\n trips_per_hour.rename(columns={'trip_id':'max_trips'}, inplace=True)\r\n trips_per_hour['max_frequency'] = (60/trips_per_hour['max_trips']).astype(int)\r\n\r\n # Get max number of trips and highest frequency\r\n max_trips = trips_per_hour.pivot_table('max_trips', index=['route_id','stop_id', 'direction_id'], aggfunc='max').reset_index()\r\n max_freq = trips_per_hour.pivot_table('max_frequency', index=['route_id','stop_id', 'direction_id'], aggfunc='min').reset_index()\r\n\r\n\r\n # Calculate frequency per window for each route\r\n trips_per_window.rename(columns={'trip_id':'ntrips'}, inplace=True)\r\n start_time = trips_per_window['window'].apply(lambda x: int(x.split(':')[0])+(int(x.split(':')[1][:2])/60))\r\n end_time = trips_per_window['window'].apply(lambda x: int(re.search('-(.*?):', x).group(1)) + (int(x.split(':')[2])/60))\r\n\r\n trips_per_window['frequency'] = ((end_time - start_time)*60 / trips_per_window.ntrips).astype(int)\r\n\r\n line_frequencies = pd.merge(trips_per_window, max_trips, how = 'left')\r\n line_frequencies = pd.merge(line_frequencies, max_freq, how = 'left')\r\n line_frequencies = pd.merge(line_frequencies, \r\n segments_gdf.loc[:, ['route_id', 'segment_id', 'start_stop_id', 'start_stop_name', 'end_stop_name','direction_id', 'geometry']],\r\n left_on=['route_id','stop_id', 'direction_id'],\r\n right_on=['route_id','start_stop_id', 'direction_id'], \r\n how='left')\r\n\r\n line_frequencies.drop_duplicates(subset=['route_id', 'stop_id', 'direction_id', 'window', 'ntrips', 'frequency',\r\n 'max_trips', 'max_frequency', 'segment_id', 'start_stop_id',\r\n 'start_stop_name', 'end_stop_name'], inplace=True)\r\n\r\n # Route name\r\n routes['route_name'] = ''\r\n if routes.route_short_name.isnull().unique()[0]:\r\n routes['route_name'] = routes.route_long_name\r\n elif routes.route_long_name.isnull().unique()[0]: \r\n routes['route_name'] = routes.route_short_name\r\n else:\r\n routes['route_name'] = routes.route_short_name + ' ' + routes.route_long_name\r\n \r\n line_frequencies = pd.merge(line_frequencies, routes.loc[:,['route_id','route_name']],how='left')\r\n\r\n # Calculate sum of trips per segment with all lines\r\n all_lines = line_frequencies.pivot_table(['ntrips'],\r\n index=['segment_id', 'window'],\r\n aggfunc = 'sum'\r\n ).reset_index()\r\n\r\n # Calculate frequency per window for all routes\r\n start_time = all_lines['window'].apply(lambda x: int(x.split(':')[0])+(int(x.split(':')[1][:2])/60))\r\n end_time = all_lines['window'].apply(lambda x: int(re.search('-(.*?):', x).group(1)) + (int(x.split(':')[2])/60))\r\n\r\n all_lines['frequency'] = ((end_time - start_time)*60 / all_lines.ntrips).astype(int)\r\n\r\n # Get max number of trips and highest frequency per segment for all routes\r\n max_trips_all_lines = all_lines.pivot_table('ntrips', index=['segment_id'], aggfunc='max').reset_index()\r\n max_freq_all_lines = all_lines.pivot_table('frequency', index=['segment_id'], aggfunc='min').reset_index()\r\n\r\n max_trips_all_lines.rename(columns=dict(ntrips='max_trips'), inplace=True)\r\n max_freq_all_lines.rename(columns=dict(frequency='max_frequency'), inplace=True)\r\n\r\n all_lines = pd.merge(all_lines, max_trips_all_lines, how = 'left')\r\n all_lines = pd.merge(all_lines, max_freq_all_lines, how = 'left')\r\n\r\n data_all_lines = pd.merge(\r\n all_lines, \r\n segments_gdf.drop_duplicates(subset=['segment_id']), \r\n left_on=['segment_id'],\r\n right_on = ['segment_id'],\r\n how='left').reset_index().sort_values(by = ['direction_id','window','stop_sequence'], ascending=True)\r\n\r\n data_all_lines.drop(['index'], axis=1, inplace=True)\r\n data_all_lines['route_id'] = 'ALL_LINES'\r\n data_all_lines['route_name'] = 'All lines'\r\n data_all_lines['direction_id'] = 'NA'\r\n data_complete = line_frequencies.append(data_all_lines).reset_index()\r\n\r\n gdf = gpd.GeoDataFrame(data=data_complete.drop('geometry', axis=1), geometry=data_complete.geometry)\r\n\r\n gdf.loc[gdf.direction_id == 0, 'direction_id'] = 'Inbound'\r\n gdf.loc[gdf.direction_id == 1, 'direction_id'] = 'Outbound'\r\n\r\n\r\n gdf.rename(columns={\r\n 'direction_id': 'dir_id',\r\n 'max_frequency': 'max_freq',\r\n 'start_stop_name': 's_st_name',\r\n 'end_stop_name': 'e_st_name',\r\n 'start_stop_id':'s_st_id'\r\n }, inplace=True)\r\n\r\n gdf = gdf.loc[:,['route_id', 'route_name', 'dir_id', 'segment_id', 'window',\r\n 'frequency', 'ntrips', 's_st_id', 's_st_name', 'e_st_name',\r\n 'max_freq', 'max_trips', 'geometry']]\r\n gdf = gdf.loc[~gdf.geometry.isnull()]\r\n gdf.sort_values(by='frequency', ascending=False, inplace=True)\r\n\r\n return gdf\r\n \r\ndef download_osm(gdf):\r\n # Define the bounding box to query\r\n bounds = gdf.geometry.total_bounds\r\n\r\n # Build the query for overspass-api\r\n overpass_url = \"http://overpass-api.de/api/interpreter\"\r\n# overpass_query = \"\"\"\r\n# [out:json];\r\n# (way[\"highway\"~\"motorway|trunk|primary|secondary|tertiary|unclassified|residential|service|living_street\"]\r\n# [\"access\"!~\"private|no\"]\r\n# ({0}, {1}, {2}, {3}););\r\n# out geom;\r\n# \"\"\".format(bounds[1], bounds[0], bounds[3], bounds[2])\r\n\r\n overpass_query = \"\"\"\r\n [out:json];\r\n (way[\"highway\"~\"motorway|trunk|primary|secondary|tertiary|unclassified|residential|service|living_street\"]\r\n ({0}, {1}, {2}, {3}););\r\n out geom;\r\n \"\"\".format(bounds[1], bounds[0], bounds[3], bounds[2])\r\n\r\n # Query overpass-api\r\n response = requests.get(overpass_url, \r\n params={'data': overpass_query})\r\n\r\n # Put the response in a DataFrame\r\n data = response.json()\r\n ways_df = pd.DataFrame(data['elements'])\r\n\r\n # Parse the content in lists\r\n node_ids = []\r\n lat_lon = []\r\n way_ids = []\r\n oneway = []\r\n segment_seq = []\r\n\r\n n_nodes = [len(n) for n in list(ways_df.nodes)]\r\n\r\n [node_ids.extend(n) for n in list(ways_df.nodes)]\r\n [lat_lon.extend(g) for g in list(ways_df.geometry)]\r\n [way_ids.extend([ways_df.loc[i, 'id']]*n_nodes[i]) for i in range(0, len(ways_df))] \r\n [oneway.extend([ways_df.loc[i, 'tags'].get('oneway', '0')]*n_nodes[i]) for i in range(0, len(ways_df))]\r\n [segment_seq.extend(list(range(1, n_nodes[i]+1))) for i in range(0, len(ways_df))] # segment sequence for that way_id\r\n\r\n # Convert to int to save memory\r\n oneway = [1 if s=='yes' else s for s in oneway] \r\n oneway = [0 if s in ['no', '0', 'reversible', '-1'] else s for s in oneway] \r\n oneway = list(map(int, oneway))\r\n\r\n # ------------------------------------------------------------------------------------\r\n # ------------------------------ NODES -----------------------------------------------\r\n # ------------------------------------------------------------------------------------\r\n\r\n # Parse the json into a dataframe\r\n nodes = pd.DataFrame()\r\n nodes['way_id'] = way_ids\r\n nodes['node_id'] = node_ids\r\n nodes['oneway'] = oneway\r\n nodes['segment_seq'] = segment_seq\r\n\r\n # Get lat,lon values right\r\n lat = [p['lat'] for p in lat_lon]\r\n lon = [p['lon'] for p in lat_lon]\r\n\r\n # Create points\r\n points = [Point(lon[i], lat[i]) for i in range(0, len(lat))]\r\n\r\n # Create GeoDataFrame\r\n nodes_gdf = gpd.GeoDataFrame(data=nodes, geometry = points)\r\n\r\n # ------------------------------------------------------------------------------------\r\n # --------------------------- SEGMENTS -----------------------------------------------\r\n # ------------------------------------------------------------------------------------\r\n\r\n # Define our lists\r\n # Does the node has the same way_id as the next node?\r\n bool_list = nodes['way_id'] == nodes['way_id'].shift(-1)\r\n # Nodes of the segment\r\n segment_nodes = ['{0} - {1}'.format(str(node_ids[i]), str(node_ids[i+1])) for i in range(0,len(node_ids)-1)]\r\n segment_ids = list(range(1, len(segment_nodes)+1))\r\n points_next = points[1:] + [None]\r\n\r\n # Remove the last node of the segment (it is already in the last segment)\r\n segment_nodes = list(compress(segment_nodes, bool_list)) \r\n segment_ids = list(compress(segment_ids, bool_list)) \r\n points = list(compress(points, bool_list)) \r\n points_next = list(compress(points_next, bool_list)) \r\n geometry = [LineString([points[i], points_next[i]]) for i in range(0,len(segment_nodes))]\r\n\r\n # Keep the segments and create the geo data frame\r\n segments = nodes.loc[bool_list, ['way_id', 'oneway', 'segment_seq']]\r\n segments['segment_nodes'] = segment_nodes\r\n segments['osm_segment_id'] = segment_ids\r\n segments_gdf = gpd.GeoDataFrame(data=segments, geometry = geometry)\r\n\r\n # ------------------------------------------------------------------------------------\r\n # --------------------------- ADD OPPOSITE SEGMENTS ----------------------------------\r\n # ------------------------------------------------------------------------------------\r\n\r\n # Create the opposite segments for two way streets\r\n opposite = segments_gdf.loc[segments_gdf.oneway == 0].reset_index()\r\n\r\n opp_nodes = ['{0} - {1}'.format(opposite.loc[i,'segment_nodes'].split(' - ')[1], opposite.loc[i,'segment_nodes'].split(' - ')[0]) for i in range(0,len(opposite))]\r\n opp_way_id = list(opposite.loc[:,'way_id'])\r\n opp_osm_segment_id = list(range(segments_gdf.osm_segment_id.max()+1, segments_gdf.osm_segment_id.max() + len(opposite) + 1))\r\n\r\n opp_geom = opposite.geometry.apply(lambda x: LineString([x.coords[1], x.coords[0]]))\r\n\r\n opp_df = pd.DataFrame()\r\n opp_df['way_id'] = opp_way_id\r\n opp_df['segment_nodes'] = opp_nodes\r\n opp_df['oneway'] = 0\r\n opp_df['osm_segment_id'] = opp_osm_segment_id\r\n opp_df['segment_seq'] = 0\r\n\r\n opp_gdf = gpd.GeoDataFrame(data=opp_df, geometry=opp_geom)\r\n\r\n segments_gdf = segments_gdf.append(opp_gdf)\r\n\r\n # Add \"from\" and \"to\" columns to make the graph generation easier\r\n segments_gdf['from'] = [int(s.split(' - ')[0]) for s in segments_gdf['segment_nodes']]\r\n segments_gdf['to'] = [int(s.split(' - ')[1]) for s in segments_gdf['segment_nodes']]\r\n \r\n return nodes_gdf, segments_gdf\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"gtfs_functions/gtfs_funtions.py","file_name":"gtfs_funtions.py","file_ext":"py","file_size_in_byte":75886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"79412777","text":"# We want to read an xml, change it and return in xml\n# use xml.etree.ElementTree module\n\nfrom xml.etree.ElementTree import parse, Element\ndoc = parse('pred.xml')\nroot = doc.getroot()\nprint (root)\n\n# Remove a few elements\nroot.remove(root.find('sri'))\nroot.remove(root.find('cr'))\nprint (root)\n\n# Insert a new element after ...\nroot.getchildren().index(root.find('nm'))\nprint (root)\n\ne = Element('spam')\ne.text = 'This is a test'\nroot.insert(2, e)\n\n# Write back to a file\ndoc.write('newpred.xml', xml_declaration=True)\n\n","sub_path":"6_data_coding_processing/6_parse_correct_rewrite_xml/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"167178843","text":"\"\"\"\n\nRuntime: 52 ms, faster than 18.00% of Python3 online submissions for Valid Parentheses.\nMemory Usage: 14 MB, less than 21.80% of Python3 online submissions for Valid Parentheses.\n\nhttps://leetcode.com/problems/valid-parentheses/discuss/9203/Simple-Python-solution-with-stack\n\n\"\"\"\n\ndef isValidParentheses(self, s):\n stack = []\n dict = {\")\":\"(\", \"}\":\"{\", \"]\":\"[\"}\n\n for bracket in s:\n # open brackets\n if bracket in dict.values():\n stack.append(bracket)\n # closed brackets\n elif bracket in dict.keys():\n # if pop closed brackets while stack is null, then return false\n # if closed bracket != open bracket, then return false\n if stack == [] or dict[bracket] != stack.pop():\n return False\n\n return stack == []\n","sub_path":"stack/0020. Valid Parentheses.py","file_name":"0020. Valid Parentheses.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"274206743","text":"'''\nSupport module for F Prime testing\n\nContains support functions and classes used in the F Prime testing\nmodules.\n\n@author: mstarch\n'''\nimport inspect\nimport unittest\n\nclass StreamForker(object):\n '''\n Forks a stream into N number of output streams, enabling one-to-many expansion of stream\n writes. This enables output logging to both the console (sys.stderr) and to a file at the\n same time.\n '''\n def __init__(self, streams=None):\n '''\n Init this object with empty list of streams\n @param streams: list of streams to log to\n '''\n if streams is None:\n streams = []\n self.streams = streams\n def write(self, buffer_data):\n '''\n Write the input buffer to all child buffers\n @param buffer_data: buffer to write to all childs\n '''\n for stream in self.streams:\n stream.write(buffer_data)\n def flush(self):\n '''\n Flushes all streams\n '''\n for stream in self.streams:\n stream.flush()\n\nclass FPrimePrettyPrint(type):\n '''\n Python metaclass --\n This metaclass is used to help in the construction of the FPrimeTestCase\n in order to ensure that the \"setUp\" method of any test case inheriting\n from FPrimeTestCase is properly wrapped in order to ensure that a\n carriage return is printed, making the output look nice.\n '''\n def __new__(mcs, name, bases, attrs):\n '''\n Use this \"new\" function when creating a \"new\" class and make sure\n that there is always a setUp function and that the carriage_return\n flag is properly set.\n '''\n upfn = attrs.get(\"setUp\", None)\n #If setup was defined, then wrap it, otherwise ignore\n if upfn is not None:\n def wrap_up(self):\n '''\n Wraps the setup function to set a variable before running it.\n '''\n self.carriage_return = True\n upfn(self)\n attrs[\"setUp\"] = wrap_up\n return super(FPrimePrettyPrint, mcs).__new__(mcs, name, bases, attrs)\n\nclass TestCaseNotConfigured(Exception):\n '''\n Exception for unconfigured child\n '''\n def __init__(self, name):\n message = \"TestCase '{0}' does not set 'gds_config' before __init__\".format(name)\n super(TestCaseNotConfigured, self).__init__(message)\n\ndef get_verbosity():\n '''\n Searches up the stack for TestProgram, then grabs verbosity\n Borrowed from: stackoverflow.com/questions/13761697/\n '''\n frame = inspect.currentframe()\n while frame:\n slf = frame.f_locals.get('self')\n if isinstance(slf, unittest.TestProgram):\n return slf.verbosity\n frame = frame.f_back\n return 0\n\ndef get_testbed():\n '''\n Searches up the stack for TestProgram, then grabs testbed\n Borrowed from: stackoverflow.com/questions/13761697/\n '''\n frame = inspect.currentframe()\n while frame:\n slf = frame.f_locals.get('self')\n if hasattr(slf, \"testbed\"):\n return slf.testbed\n frame = frame.f_back\n return {\"name\": \"default\"}\n","sub_path":"Gse/src/fprime/gse/testing/support.py","file_name":"support.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"44910676","text":"import torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport signal_utils\nimport dip_utils\nimport time\nfrom sklearn.model_selection import ParameterGrid\n\n#Define Network Parameters\n#LR = 1e-4 #learning rate\nMOM = 0.9 #momentum\n#WD = 1 #weight decay for l2-regularization\nZ_NUM = 32 #input seed size\n#NGF = 64 #number of filters per layer\nNC = 1 #number of channels\nLENGTH = 1024 #length of output signal\n#NUM_ITER = 3000 #number iterations\n#ALPHA_TV = 1e-1 #TV parameter for net loss\n#ALPHA_L1 = 1e-1\n\nLR = [1e-3, 1e-4, 1e-5]\nWD = [1e-2, 1e-1, 1, 10]\nALPHA_TV = [1e-2, 1e-1, 1]\nNGF = [16, 32, 64]\nNUM_ITER = [1000, 2000, 3000]\n\nparam_grid = {'LR': LR,\n 'WD': WD,\n 'ALPHA_TV': ALPHA_TV,\n 'NGF': NGF,\n 'NUM_ITER': NUM_ITER\n }\n\nindex_grid = {'LR': np.arange(len(LR)),\n 'WD': np.arange(len(WD)),\n 'ALPHA_TV': np.arange(len(ALPHA_TV)),\n 'NGF': np.arange(len(NGF)),\n 'NUM_ITER': np.arange(len(NUM_ITER))\n }\n\n#ParamGrid = ParameterGrid(param_grid)\nIndexGrid = ParameterGrid(index_grid)\n\n#Check for CUDA\nCUDA = torch.cuda.is_available()\nprint(\"GPU is available: \", CUDA)\nif CUDA:\n dtype = torch.cuda.FloatTensor\nelse:\n dtype = torch.FloatTensor\n\n#Define data location and type\ncur_dir = \"/home/sravula/PycharmProjects/1D-DIP/\"\ndata_loc = cur_dir + \"data/AirQuality.csv\"\n\nsample = \"O3-1\"\ntest_type = \"CS\"\n\n#Get the signal\nx0 = signal_utils.get_air_data(loc=data_loc, data=sample, length=LENGTH)\nx = np.zeros((LENGTH, 1))\nx[:,0] = np.squeeze(signal_utils.normalise(x0))\n\nNUM_MEASUREMENTS = [25, 75, 150]\n\nparams_size = (len(NUM_MEASUREMENTS), len(LR), len(WD), len(ALPHA_TV), len(NGF), len(NUM_ITER))\nmse_log = np.zeros(params_size)\n\nnum_instances = 3\n\nstart = time.time()\n\nfor n in range(len(NUM_MEASUREMENTS)):\n\n A = signal_utils.get_A(case=test_type, num_measurements=NUM_MEASUREMENTS[n], original_length=LENGTH)\n\n #imputed_samples = [z for z in range(0, LENGTH) if z not in kept_samples]\n\n y = np.dot(A, x)\n\n for index in IndexGrid:\n\n lr_ind = index['LR']\n wd_ind = index['WD']\n tv_ind = index['ALPHA_TV']\n ngf_ind = index['NGF']\n iter_ind = index['NUM_ITER']\n\n learning_rate = param_grid['LR'][lr_ind]\n weight_decay = param_grid['WD'][wd_ind]\n alpha_tv = param_grid['ALPHA_TV'][tv_ind]\n ngf = param_grid['NGF'][ngf_ind]\n num_iters = param_grid['NUM_ITER'][iter_ind]\n\n params = \"LR: \" + str(learning_rate) + \", WD: \" + str(weight_decay) + \", TV: \" + str(alpha_tv) + \", NGF: \" + str(ngf) + \", ITERS: \" + str(num_iters)\n\n print(\"\\nM=\" + str(NUM_MEASUREMENTS[n]) + \"\\n\" + params)\n\n mse = 0\n\n for t in range(num_instances):\n x_hat = dip_utils.run_TCN(A, y, dtype, ngf, learning_rate, MOM, weight_decay, NC, LENGTH, NUM_MEASUREMENTS[n], CUDA,\n num_iters, alpha_tv=alpha_tv)\n\n mse = mse + np.mean((x_hat.squeeze() - x.squeeze())**2)\n\n mse = mse/num_instances\n\n mse_log[n, lr_ind, wd_ind, tv_ind, ngf_ind, iter_ind] = mse\n\n print(str(mse))\n\nnp.save(\"TCN_Imputation_Gridsearch\", mse_log)\n","sub_path":"param_search.py","file_name":"param_search.py","file_ext":"py","file_size_in_byte":3255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"542214911","text":"from django.db.models import TextChoices\n\nfrom channels.generic.websocket import AsyncJsonWebsocketConsumer\n\nfrom .exceptions import ClientError\n\n\nclass ChannelsGroups(TextChoices):\n NEW_MESSAGES = 'new_messages'\n\n\nclass ChatConsumer(AsyncJsonWebsocketConsumer):\n \"\"\"\n Consumer that will listen and notify whenever a new message is created.\n\n We are splitting all the type of messages so we can have more flexibility\n in the future\n \"\"\"\n\n groups = [ChannelsGroups.NEW_MESSAGES]\n\n async def connect(self):\n \"\"\"\n Called when a connection to web socket is established. If user is\n anonymous, we reject the connection, otherwise we accept it.\n \"\"\"\n self.user = self.scope.get('user', None)\n if self.user.is_anonymous:\n await self.close()\n else:\n await self.accept()\n\n async def new_message(self, event):\n \"\"\"\n Receive the message from the signal that tells us that a new chat\n message has been created, so we can send it back to the clients\n \"\"\"\n # the 'event' will already contain the serialized message data\n await self.send_json(event)\n\n async def new_chat(self, event):\n \"\"\"\n Receive the chat from the signal that tells us that a new chat\n has been created, so we can send it back to the clients\n \"\"\"\n # the 'event' will already contain the serialized chat data\n await self.send_json(event)\n\n async def new_user_added(self, event):\n \"\"\"\n Receive the list of users currently in the chat, alongside the chat id\n \"\"\" \n await self.send_json(event)\n\n\n async def user_typing(self, event):\n \"\"\"\n Receive an object containing the user_id, username, and chat_id of a \n user typing in some chat room \n \"\"\"\n await self.send_json(event)\n\n async def user_stopped_typing(self, event):\n \"\"\"\n Receive an object containing the user_id, username, and chat_id of a \n user that stopped typing in some chat room \n \"\"\"\n await self.send_json(event)\n\n async def receive_json(self, content, **kwargs):\n \n await self.channel_layer.group_send(\n ChannelsGroups.NEW_MESSAGES,\n content\n )","sub_path":"server/server/api/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"314379929","text":"#!python3 \n# Chat Explore: a cmdline program to parse/analyze chats in whatsapp\n\nimport os\nimport re\nimport csv\n\n\n\n\n\nprint(\"Welcome to Chat Explore! \")\n\n# set working dir\nprint('Moving to working dir.. ')\nos.chdir('/Users/poisonarena/Documents/chatData/chatGroup') # Target Chat file\n\n\ndef chatTotalMsgs():\n\tprint('Reading Chat Log.. ')\n\tchatBaseFile = open('chat.txt', 'r')\n\tlines = chatBaseFile.readlines()\n\tchatBaseFile.close()\n\t\n\tcountAndres = 0\n\tcountSantiago = 0\n\tcountPhilip = 0\n\tcountPaco = 0\n\tcountChundo = 0\n\tcountPauli = 0\n\tcountTonyo = 0\n\tcountHarim = 0\n\tcountJC = 0\n\tcountLuis = 0\n\tcountCato = 0\n\tcountWino = 0\n\tcountOtherAndres = 0\n\tcountAlfonso = 0\n\n\tfor line in lines:\n\t\tline = line.strip()\n\t\tif line.find('andres:')!= -1:\n\t\t\tcountAndres = countAndres + 1\n\t\tif line.find('Santiago:')!= -1:\n\t\t\tcountSantiago = countSantiago + 1\n\t\tif line.find('ᚡ:')!= -1:\n\t\t\tcountPhilip = countPhilip + 1\n\t\tif line.find('Paco:')!= -1:\n\t\t\tcountPaco = countPaco + 1\n\t\tif line.find('Chundo:')!= -1:\n\t\t\tcountChundo = countChundo + 1\n\t\tif line.find('pauli:')!= -1:\n\t\t\tcountPauli = countPauli + 1\n\t\tif line.find('+52 1 55 8534 7800‬:')!= -1:\n\t\t\tcountTonyo = countTonyo + 1\n\t\tif line.find('Harim:')!= -1:\n\t\t\tcountHarim = countHarim + 1\n\t\tif line.find('ritualz:')!= -1:\n\t\t\tcountJC = countJC + 1\n\t\tif line.find('+52 1 55 1333 3487:')!= -1:\n\t\t\tcountLuis = countLuis + 1\n\t\tif line.find('cato:')!= -1:\n\t\t\tcountCato = countCato + 1\n\t\tif line.find('pace man:')!= -1:\n\t\t\tcountWino = countWino + 1\n\t\tif line.find('+33 6 03 33 27 65‬:')!= -1:\n\t\t\tcountOtherAndres = countOtherAndres + 1\n\t\tif line.find('+52 1 55 1850 3431‬:')!= -1:\n\t\t\tcountAlfonso = countAlfonso + 1\n\n\n\n\n\n\n\tprint('Andrics messages = ', countAndres)\n\tprint('Santiagos messages = ', countSantiago)\n\tprint('DK´s messages = ', countPhilip)\n\tprint('Pacos messages = ', countPaco)\n\tprint('Chundo messages = ', countChundo)\n\tprint('Pauli messages = ', countPauli)\n\tprint('Toño messages = ', countTonyo)\n\tprint('Harim messages = ', countHarim)\n\tprint('JC messages = ', countJC)\n\tprint('Luis messages = ', countLuis )\n\tprint('Cato messages = ', countCato)\n\tprint('Wino messages = ', countWino)\n\tprint('Other Andres messages = ', countOtherAndres)\n\tprint('Alfonso messages = ', countAlfonso)\n\nchatTotalMsgs()\n\n","sub_path":"chatExplore.py","file_name":"chatExplore.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"356735779","text":"from flask import session\n\nfrom libraries.couchdb import Couchdb\n\nimport json\n\nclass User(object):\n\t\n\tdef __init__(self):\n\t\tsuper(User, self).__init__()\n\t\tself.db = Couchdb('users')\n\n\tdef getUser(self,email,password):\n\t\tself.db.name = 'users'\n\n\t\tarray = {\n\t\t\t'selector': {\n\t\t\t\t'$and': [\n \t\t\t\t{ 'email': email },\n \t\t\t\t{ 'password': password }\n \t\t\t]\n\t\t\t},\n\t\t\t'fields': ['_id', '_rev','name','email','profilePhoto']\n\t\t}\n\n\t\tdata = self.db.find(array)\n\n\t\treturn data\n\n\tdef getCurrentUser(self):\n\t\tarray = {\n\t\t\t'selector': {\n\t\t\t\t'_id': session['_id']\n\t\t\t},\n\t\t\t'fields': ['_id','_rev','cellphone','email','last_name','name']\n\t\t}\n\n\t\tdata = self.db.find(array)\n\n\t\treturn data\n\n\tdef getCurrentDataUser(self):\n\t\tarray = {\n\t\t\t'selector': {\n\t\t\t\t'_id': session['_id']\n\t\t\t},\n\t\t}\n\n\t\tdata = self.db.find(array)\n\n\t\treturn data\n\n\tdef updateData(self,obj):\n\t\tdata = self.db.create(obj)\n\n\t\treturn data\n\n\tdef getUserById(self,id):\n\t\tarray = {\n\t\t\t'selector': {\n\t\t\t\t'_id': id\n\t\t\t},\n\t\t}\n\n\t\tdata = self.db.find(array)\n\n\t\treturn data\n\n\tdef createUser(self,obj):\n\t\tobj['work_experience_years'] = None\n\t\tobj['city'] = None\n\t\tobj['marital_status'] = None\n\t\tobj['profession'] = None\n\t\tobj['profilePhoto'] = None\n\t\tobj['phone'] = None\n\t\tobj['doc_type'] = 'general'\n\t\tobj['last_name'] = None\n\t\tobj['address'] = None\n\t\tobj['nationality'] = None\n\n\t\tdata = self.db.create(obj)\n\n\t\treturn data","sub_path":"models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"546710558","text":"import tensorflow as tf\nimport tqdm\nimport numpy as np\nfrom reading_tfrecord import input_func\nimport matplotlib.pyplot as plt\n\ndef model(in_data,keep_prob):\n \n conv1_filter = tf.Variable(tf.truncated_normal(shape=[4, 4, 3, 64], mean=0, stddev=0.08))\n conv2_filter = tf.Variable(tf.truncated_normal(shape=[4, 4, 64, 128], mean=0, stddev=0.08))\n conv3_filter = tf.Variable(tf.truncated_normal(shape=[4, 4, 128, 256], mean=0, stddev=0.08))\n conv4_filter = tf.Variable(tf.truncated_normal(shape=[3, 3, 256, 256], mean=0, stddev=0.08))\n x=tf.layers.batch_normalization(in_data)\n \n # 1, 2\n conv1 = tf.nn.conv2d(x, conv1_filter, strides=[1,1,1,1], padding='SAME')\n conv1 = tf.nn.relu(conv1)\n conv1_pool = tf.nn.max_pool(conv1, ksize=[1,2,2,1], strides=[1,1,1,1], padding='SAME')\n conv1_bn = tf.layers.batch_normalization(conv1_pool)\n \n print(conv1_bn.shape)\n\n '''\n # 3, 4\n conv2 = tf.nn.conv2d(conv1_bn, conv2_filter, strides=[1,1,1,1], padding='SAME')\n conv2 = tf.nn.relu(conv2)\n conv2_pool = tf.nn.max_pool(conv2, ksize=[1,2,2,1], strides=[1,1,1,1], padding='SAME') \n conv2_bn = tf.layers.batch_normalization(conv2_pool)\n \n \n \n # 5, 6\n conv3 = tf.nn.conv2d(conv2_bn, conv3_filter, strides=[1,1,1,1], padding='SAME')\n conv3 = tf.nn.relu(conv3)\n conv3_pool = tf.nn.max_pool(conv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME') \n conv3_bn = tf.layers.batch_normalization(conv3_pool)\n \n \n # 7, 8\n conv4 = tf.nn.conv2d(conv3_bn, conv4_filter, strides=[1,1,1,1], padding='SAME')\n conv4 = tf.nn.relu(conv4)\n conv4_pool = tf.nn.max_pool(conv4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')\n conv4_bn = tf.layers.batch_normalization(conv4_pool)\n print(conv4_bn.shape)\n '''\n # 9\n flat = tf.contrib.layers.flatten(conv1_bn) \n \n # 10\n full1 = tf.contrib.layers.fully_connected(inputs=flat, num_outputs=128, activation_fn=tf.nn.relu)\n full1 = tf.layers.batch_normalization(full1)\n full1 = tf.nn.dropout(full1, keep_prob)\n \n # 11\n full2 = tf.contrib.layers.fully_connected(inputs=full1, num_outputs=256, activation_fn=tf.nn.relu)\n full2 = tf.layers.batch_normalization(full2)\n full2 = tf.nn.dropout(full2, keep_prob)\n \n # 12\n full3 = tf.contrib.layers.fully_connected(inputs=full2, num_outputs=512, activation_fn=tf.nn.relu)\n full3 = tf.layers.batch_normalization(full3) \n full3 = tf.nn.dropout(full3, keep_prob) \n \n # 13\n full4 = tf.contrib.layers.fully_connected(inputs=full3, num_outputs=512, activation_fn=tf.nn.relu)\n full4 = tf.layers.batch_normalization(full4)\n full4 = tf.nn.dropout(full4, keep_prob) \n \n # 14\n out = tf.contrib.layers.fully_connected(inputs=full4, num_outputs=10, activation_fn=None)\n return out\n\ndef train_model(data,epoch=30):\n x_place = tf.placeholder(tf.float32,[None,28,28,3],name='x_place')\n y_place = tf.placeholder(tf.int32,[None],name='y_place')\n infer_data = tf.data.Dataset.from_tensor_slices((x_place,y_place))\n infer_data = infer_data.batch(100)\n Iterator = tf.data.Iterator.from_structure(data.output_types,data.output_shapes)\n next_image,next_label = Iterator.get_next()\n Y = tf.one_hot(next_label,10)\n Y = tf.cast(Y,tf.int32)\n logits = model(next_image,0.9)\n\n \n \n train_op = Iterator.make_initializer(data,name='train_op')\n test_op = Iterator.make_initializer(infer_data,name='test_op')\n\n \n with tf.name_scope('loss'):\n loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,labels=Y),name='loss')\n optimizer = tf.train.AdamOptimizer(learning_rate=0.005).minimize(loss)\n\n prediction = tf.argmax(logits,1,name='pred')\n equal = tf.equal(prediction,tf.argmax(Y,1))\n accuracy = tf.reduce_mean(tf.cast(equal,tf.float32))\n\n tf.summary.scalar('loss',loss)\n tf.summary.scalar('accuracy',accuracy)\n\n merge = tf.summary.merge_all()\n\n saver = tf.train.Saver()\n init = tf.global_variables_initializer()\n\n j=0\n with tf.Session() as sess:\n sess.run(init)\n writer = tf.summary.FileWriter('C:/Users/default.DESKTOP-43PHGMT/Desktop/projects/Number Recognition/trained_graph',sess.graph)\n for i in range(epoch):\n sess.run(train_op)\n if(i>=2):\n saver.save(sess,\"C:/Users/default.DESKTOP-43PHGMT/Desktop/projects/Number Recognition/trained_graph/number_classifier\"+str(j))\n while(True):\n try:\n j+=1\n if j%100!=0:\n summ,_ = sess.run([merge,optimizer])\n writer.add_summary(summ,j)\n else:\n \n l,_,acc = sess.run([loss,optimizer,accuracy])\n if(j==1 or j%20==0):\n print(\"iters: {}, loss: {:.10f}, training accuracy: {:.2f}\".format(j, l, acc*100))\n \n except tf.errors.OutOfRangeError:\n break\n \n saver.save(sess,\"C:/Users/default.DESKTOP-43PHGMT/Desktop/projects/Number Recognition/trained_graph/number_classifier\"+str(j))\n \n \n \n\n\n \ndata = input_func('C:/Users/default.DESKTOP-43PHGMT/Desktop/projects/Number Recognition/train.tfrecords')\ntrain_model(data)\n\n\n \n","sub_path":"number_classifier.py","file_name":"number_classifier.py","file_ext":"py","file_size_in_byte":5408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"219626453","text":"class Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n if not candidates:\n return []\n \n def helper(candidates, target, sol_list, current_list, last):\n if target == 0:\n sol_list.append(current_list[:])\n return True\n if target < 0:\n return False\n for idx in range(last, len(candidates)):\n value = candidates[idx]\n current_list.append(value)\n if not helper(candidates, target-value, sol_list, current_list, idx):\n current_list.pop()\n break\n current_list.pop()\n return True\n \n sol_list = []\n candidates.sort()\n helper(candidates, target, sol_list, [], 0)\n return sol_list\n","sub_path":"leetcode/39_combination_sum.py","file_name":"39_combination_sum.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"526496572","text":"#1.)Working with text using BAG OF WORDS model\n#link-https://andhint.github.io/machine-learning/nlp/Feature-Extraction-From-Text/\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport pandas as pd\nvect = CountVectorizer()\nvect.fit(list_of_messages)\ndtm = vect.transform(list_of_messages)\npd.DataFrame(dtm.toarray(), columns=vect.get_feature_names())\n\n#2.)Working with text using Term Frequency Inverse documnet frequency model\n# args-max_features , min_df ,max_df ,stop_words \n#link-https://andhint.github.io/machine-learning/nlp/Feature-Extraction-From-Text/\nfrom sklearn.feature_extraction.text import TfidfVectorizer\ndef createDTM(list_of_messages):\n vect = TfidfVectorizer()\n dtm = vect.fit_transform(list_of_messages) # create DTM\n # create pandas dataframe of DTM\n return pd.DataFrame(dtm.toarray(), columns=vect.get_feature_names()) \n\n#3.)Working with text using Word2Vec model\n#link-https://taylorwhitten.github.io/blog/word2vec\nimport gensim\nfrom gensim.models import Word2Vec\nfrom gensim.models import Phrases\nmodel = word2vec.Word2Vec(list_of_sentences_containing_list_of_tokens, workers=num_workers, \\\n size=num_features, min_count = min_word_count, \\\n window = context, sample = downsampling)\nmodel.init_sims(replace=True)\nmodel_name = \"nytimes_oped\"\nmodel.save(model_name)\nnew_model = gensim.models.Word2Vec.load('nytimes_oped')\n\n#Adding support for bigram and tiagram\nbigramer = gensim.models.Phrases(sentences)\ntrigram = Phrases(bigram[sentence_stream])\nmodel = Word2Vec(trigram, workers=num_workers, \\\n size=num_features, min_count = min_word_count, \\\n window = context, sample = downsampling)\n\n\n\n#4.)Working with text using Word2Vec model and using that for text classification by buidling a doc vector\n#http://nadbordrozd.github.io/blog/2016/05/20/text-classification-with-word2vec/\n\nimport gensim\n# let X be a list of tokenized texts (i.e. list of lists of tokens)\nmodel = gensim.models.Word2Vec(X, size=100)\nw2v = dict(zip(model.wv.index2word, model.wv.syn0))\n\nclass TfidfEmbeddingVectorizer(object):\n def __init__(self, word2vec):\n self.word2vec = word2vec\n self.word2weight = None\n self.dim = len(word2vec.itervalues().next())\n\n def fit(self, X, y):\n tfidf = TfidfVectorizer(analyzer=lambda x: x)\n tfidf.fit(X)\n # if a word was never seen - it must be at least as infrequent\n # as any of the known words - so the default idf is the max of \n # known idf's\n max_idf = max(tfidf.idf_)\n self.word2weight = defaultdict(\n lambda: max_idf,\n [(w, tfidf.idf_[i]) for w, i in tfidf.vocabulary_.items()])\n\n return self\n\n def transform(self, X):\n return np.array([\n np.mean([self.word2vec[w] * self.word2weight[w]\n for w in words if w in self.word2vec] or\n [np.zeros(self.dim)], axis=0)\n for words in X\n ])\n \n\netree_w2v_tfidf = Pipeline([\n (\"word2vec vectorizer\", TfidfEmbeddingVectorizer(w2v)),\n (\"extra trees\", ExtraTreesClassifier(n_estimators=200))])\n","sub_path":"text_manipulation.py","file_name":"text_manipulation.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"410049357","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import absolute_import, division, print_function, unicode_literals\r\nfrom builtins import *\r\nfrom future.utils import raise_from\r\n\r\nimport types\r\nimport traceback\r\ntry:\r\n from html.parser import HTMLParser\r\nexcept:\r\n from HTMLParser import HTMLParser\r\n\r\nfrom ttmake.private.exceptionclasses import CritError\r\n\r\n\r\ndef textfill(**kwargs):\r\n '''\r\n #################################################################\r\n # textfill_readme.txt - Help/Documentation for textfill.py\r\n #################################################################\r\n\r\n Description:\r\n textfill.py is a python module designed to copy sections of log files produced\r\n by Stata to LyX files.\r\n\r\n Usage:\r\n Textfill takes as input log files produced by Stata (the input files) and a LyX\r\n file with labels indicating where logs should be inserted (the template file),\r\n and produces a LyX file (the output file) which includes sections of the input\r\n files (as indicated by tags inside the input files) in the locations indicated\r\n by the labels in the template file.\r\n\r\n Textfill must first be imported to make.py. This is typically achieved by\r\n including the following lines:\r\n\r\n ```\r\n from ttmake.textfill import textfill\r\n ```\r\n\r\n Once the module has been imported, the syntax used to call textfill is as follows:\r\n\r\n ```\r\n textfill( input = 'input_file(s)', template = 'template_file', output = 'output_file',\r\n [size = 'size'], [remove_echoes = 'True/False'] )\r\n ```\r\n\r\n The argument 'input' is a list of the text files containing the stata logs to be\r\n copied to the LyX tables. If there are multiple input text files, they are listed as:\r\n input = 'input_file_1 input_file_2'. The argument 'template' is the user written LyX\r\n file which contains the labels which will be replaced with sections of the log files.\r\n The argument 'output' is the name of the filled LyX file to be produced. Note that this\r\n file is created by textfill.py, and should not be edited manually by the user.\r\n\r\n There are two optional arguments: 'size' and 'remove_echoes'. The argument 'size'\r\n determines the size of inserted text relative to body text in the output file.\r\n It accepts LaTeX font size arguments, and defaults to same size as body. The argument\r\n 'remove_echoes' determines whether or not Stata command echoes are removed from the\r\n copied log. It defaults to false.\r\n\r\n\r\n ###########################\r\n Input File Format:\r\n ###########################\r\n\r\n Input files for textfill.py are log files produced by Stata. Sections of input files\r\n to be inserted by textfill are indicated by tags printed by the stata command\r\n 'insert_tags', which is defined by a ttecon ado file in ttecon/ttstata/misc/.\r\n\r\n In the stata do file which produces the input logs, the user begins a tagged section\r\n with the command:\r\n insert_tag tag_name, open\r\n\r\n This will insert the following line, which indicates the beginning of a tagged section\r\n of the log, into the log file:\r\n ``\r\n\r\n The user should now add lines to the do file which print the output they want to add to\r\n the tagged section, followed by the line:\r\n insert_tag tag_name, close\r\n\r\n This inserts the following line to the log file, indicating the end of the tagged section:\r\n ``\r\n\r\n\r\n ###########################\r\n Template LyX Format:\r\n ###########################\r\n\r\n The LyX template file contains labels which determine where the tagged sections of the\r\n input files are inserted. To insert a log section tagged as 'tag_name', in a particular\r\n place in the LyX file, the user inserts a label object with the value 'text:tag_name'\r\n inside a 'Text' custom inset. The 'text:' part of the label is mandatory. When textfill\r\n is run, the tagged section of the input files will be inserted as text input at the\r\n location of corresponding label in the LyX file.\r\n\r\n Note that the 'Text' custom inset object is available from 'Insert > Custom Insets' when\r\n Lyx had been reconfigured with the custom module text.module. This module is available on\r\n the repo at /admin/Computer Build Sheet/, and can be installed according to the instructions\r\n in /admin/Computer Build Sheet/standard_build.pdf.\r\n\r\n Note that label/tag names cannot be duplicated. For a single template file, each block of\r\n text to be inserted must have a unique label, and there must be one, and only one, section\r\n of the input files tagged with that same label. Having multiple sections of the input files\r\n or multiple labels in the template file with the same name will cause errors.\r\n\r\n Note also that when a LyX file with a 'text:' label is opened in LyX, or when textfill.py is\r\n run on it, LyX may issue a warning:\r\n \"The module text has been requested by this document but has not been found...\"\r\n\r\n This warning means that the custom module text.module has not been installed - see above.\r\n\r\n\r\n #####################\r\n # Example\r\n #####################\r\n\r\n The following is an example of code, which could appear in a Stata do file, used to produce\r\n input for textfill.\r\n ```\r\n insert_tag example_log, open\r\n display \"test\"\r\n insert_tag example_log, close\r\n ```\r\n\r\n Suppose output from Stata is being logged in stata.log. This code adds the following lines\r\n to stata.log:\r\n\r\n ```\r\n . insert_tag example_log, open\r\n \r\n\r\n . display \"test\"\r\n test\r\n\r\n . insert_tag example_log, close\r\n \r\n ```\r\n\r\n Suppose we have a LyX file, template.lyx, which contains a label with the value\r\n \"text:example_log\" (without the \"\"). The following textfill command,\r\n `textfill( input = 'stata.log', template = 'template.lyx', output = 'output.lyx' )`\r\n\r\n would produce a file, output.lyx, identical to template.lyx, but with the label\r\n \"text:example.log\" replaced with the verbatim input:\r\n\r\n ```\r\n . display \"test\"\r\n test\r\n ```\r\n\r\n The following command,\r\n `textfill( input = 'stata.log', template = 'template.lyx',\r\n output = 'output.lyx', remove_echoes = True )`\r\n\r\n would produce output.lyx replacing the label with the verbatim input (removing Stata command echoes):\r\n\r\n\r\n `test`\r\n\r\n\r\n ######################\r\n # Error Logging\r\n ######################\r\n\r\n If an error occurs during the call to text, it will be displayed in the command window.\r\n When make.py finishes, the user will be able to scroll up through the output and examine\r\n any error messages. Error messages, which include a description of the error type\r\n and a traceback to the line of code where the error occurred, can also be returned as a\r\n string object using the following syntax:\r\n\r\n ```\r\n exitmessage = textfill( input = 'input_file(s)', template = 'template_file', output = 'output_file',\r\n [size = 'size'], [remove_echoes = 'True/False'] )\r\n ```\r\n\r\n Lines can then be added to make.py to output this string to a log file using standard\r\n Python and built in ttmake commands.\r\n '''\r\n try:\r\n args = parse_arguments(kwargs)\r\n text = parse_text(args)\r\n insert_text(args, text)\r\n exitmessage = args['template'] + ' filled successfully by textfill'\r\n print(exitmessage)\r\n return exitmessage\r\n\r\n except:\r\n print('Error Found')\r\n exitmessage = traceback.format_exc()\r\n print(exitmessage)\r\n return exitmessage\r\n\r\n\r\ndef parse_arguments(kwargs):\r\n args = dict()\r\n if 'input' in kwargs.keys():\r\n input_list = kwargs['input'].split()\r\n args['input'] = input_list\r\n if 'template' in kwargs.keys():\r\n args['template'] = kwargs['template']\r\n if 'output' in kwargs.keys():\r\n args['output'] = kwargs['output']\r\n if 'remove_echoes' in kwargs.keys():\r\n args['remove_echoes'] = kwargs['remove_echoes']\r\n else:\r\n args['remove_echoes'] = False\r\n if 'size' in kwargs.keys():\r\n args['size'] = kwargs['size']\r\n else:\r\n args['size'] = 'Default'\r\n if 'prefix' in kwargs.keys():\r\n args['prefix'] = kwargs['prefix'] + \"_\"\r\n else:\r\n args['prefix'] = 'textfill_'\r\n\r\n return args\r\n\r\n\r\ndef parse_text(args):\r\n text = read_text(args['input'], args['prefix'])\r\n text = clean_text(text, args['remove_echoes'])\r\n\r\n return text\r\n\r\n\r\ndef read_text(input, prefix):\r\n data = ''\r\n if isinstance(input, types.StringTypes):\r\n input = [input]\r\n for file in input:\r\n data += open(file, 'rU').read()\r\n text = text_parser(prefix)\r\n text.feed(data)\r\n text.close()\r\n\r\n return text\r\n\r\n\r\nclass text_parser(HTMLParser):\r\n def __init__(self, prefix):\r\n HTMLParser.__init__(self)\r\n self.recording = False\r\n self.results = {}\r\n self.open = []\r\n self.closed = []\r\n self.prefix = prefix\r\n\r\n def handle_starttag(self, tag, attrs):\r\n if tag.startswith(self.prefix):\r\n tag_name = tag.replace(self.prefix, '', 1)\r\n self.recording = True\r\n self.results[tag_name] = ''\r\n self.open.append(tag_name)\r\n\r\n def handle_data(self, data):\r\n if self.recording:\r\n self.results[self.open[-1]] += data\r\n\r\n def handle_endtag(self, tag):\r\n if tag.startswith(self.prefix):\r\n tag_name = tag.replace(self.prefix, '', 1)\r\n self.open.remove(tag_name)\r\n self.closed.append(tag_name)\r\n if not self.open:\r\n self.recording = False\r\n\r\n def close(self):\r\n for tag in self.results.keys():\r\n if tag not in self.closed:\r\n raise_from(CritError('Tag %s is not closed' % tag), None)\r\n\r\n\r\ndef clean_text(text, remove_echoes):\r\n for key in text.results:\r\n data = text.results[key].split('\\n')\r\n if remove_echoes:\r\n data = filter(lambda x: not x.startswith('.'), data)\r\n else:\r\n data = filter(lambda x: not x.startswith('. insert_tag'), data)\r\n data = remove_trailing_leading_blanklines(data)\r\n text.results[key] = '\\n'.join(data)\r\n\r\n return text\r\n\r\n\r\ndef remove_trailing_leading_blanklines(list):\r\n while list and not list[0]:\r\n del list[0]\r\n while list and not list[-1]:\r\n del list[-1]\r\n\r\n return list\r\n\r\n\r\ndef insert_text(args, text):\r\n lyx_text = open(args['template'], 'rU').readlines()\r\n # Loop over (expanding) raw LyX text\r\n n = 0\r\n loop = True\r\n while loop:\r\n n += 1\r\n if n < len(lyx_text):\r\n if (lyx_text[n].startswith('name \"text:')):\r\n tag = lyx_text[n].replace('name \"text:', '', 1).rstrip('\"\\n').lower()\r\n if tag in text.results:\r\n # Insert text after preceding layout is closed\r\n insert_now = False\r\n i = n\r\n while not insert_now:\r\n i += 1\r\n if lyx_text[i] == '\\\\end_layout\\n':\r\n insert_now = True\r\n\r\n # Insert text\r\n for key in text.results:\r\n if tag == key:\r\n lyx_code = write_data_to_lyx(text.results[key], args['size'])\r\n lyx_text.insert(i + 1, lyx_code)\r\n else:\r\n loop = False\r\n\r\n outfile = open(args['output'], 'wb')\r\n outfile.write(''.join(lyx_text))\r\n outfile.close()\r\n\r\n return lyx_text\r\n\r\n\r\ndef write_data_to_lyx(data, size):\r\n data_list = data.split('\\n')\r\n linewrap_beg = '\\\\begin_layout Plain Layout\\n'\r\n linewrap_end = '\\\\end_layout\\n'\r\n if size != 'Default':\r\n size_line = '\\\\backslash\\n' + size + '\\n' + linewrap_end + linewrap_beg\r\n else:\r\n size_line = ''\r\n\r\n preamble = '\\\\begin_layout Plain Layout\\n' \\\r\n '\\\\begin_inset ERT status collapsed\\n' \\\r\n '\\\\begin_layout Plain Layout\\n' + size_line + \\\r\n '\\\\backslash\\nbegin{verbatim}\\n' \\\r\n '\\end_layout'\r\n postamble = '\\\\begin_layout Plain Layout\\n' \\\r\n '\\\\backslash\\nend{verbatim}\\n' \\\r\n '\\end_layout\\n' \\\r\n '\\end_inset\\n' \\\r\n '\\end_layout'\r\n\r\n lyx_code = preamble\r\n for line in data_list:\r\n lyx_code += linewrap_beg + line + linewrap_end\r\n lyx_code += postamble\r\n\r\n return lyx_code\r\n","sub_path":"lib/ttmake/textfill.py","file_name":"textfill.py","file_ext":"py","file_size_in_byte":12614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"426617457","text":"import random\r\nn=15\r\ndef functions():\r\n card = {\"B\":[], \"I\":[], \"N\":[], \"G\":[] , \"O\":[]}\r\n lower = 1\r\n upper = 1 + 15\r\n for l in card:\r\n card[l] = random.sample(range(lower,upper), 5)\r\n lower = lower + n\r\n upper = upper + n\r\n return card\r\n\r\n\r\ndef displayCard(card):\r\n print(\"B I N G O\")\r\n for letter in card:\r\n for number in card[letter]:\r\n print(number, end = \"\\t\")\r\n print(\"\\t\")\r\n print(\"\\t\")\r\n\r\ncard = functions()\r\ndisplayCard(card)","sub_path":"138.py","file_name":"138.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"571522132","text":"# Node is researcher\n# Edge is research keyword\n\nimport networkx as nx\nimport pandas as pd\nimport numpy as np\nimport math\nimport itertools\n\ndf = pd.read_csv(\"./20160318.csv\", encoding=\"SHIFT-JIS\", index_col=\"No\")\n\nG = nx.Graph()\n\n# Nodes\n# Use id & keywords\n# Make keyword map\n# nodes = ids\nword_to_idxs = {}\nids = []\nfor idx, researcher in df.iterrows():\n id = idx\n ids.append(id)\n keyword_1 = researcher[\"keyword_1\"]\n keyword_2 = researcher[\"keyword_2\"]\n keyword_3 = researcher[\"keyword_3\"]\n keyword_4 = researcher[\"keyword_4\"]\n keyword_5 = researcher[\"keyword_5\"]\n if keyword_1 == keyword_1:\n if (keyword_1 not in word_to_idxs):\n word_to_idxs[keyword_1] = []\n word_to_idxs[keyword_1].append(id)\n if keyword_2 == keyword_2:\n if (keyword_2 not in word_to_idxs):\n word_to_idxs[keyword_2] = []\n word_to_idxs[keyword_2].append(id)\n if keyword_3 == keyword_3:\n if (keyword_3 not in word_to_idxs):\n word_to_idxs[keyword_3] = []\n word_to_idxs[keyword_3].append(id)\n if keyword_4 == keyword_4:\n if (keyword_4 not in word_to_idxs):\n word_to_idxs[keyword_4] = []\n word_to_idxs[keyword_4].append(id)\n if keyword_5 == keyword_5:\n if (keyword_5 not in word_to_idxs):\n word_to_idxs[keyword_5] = []\n word_to_idxs[keyword_5].append(id)\nnodes = ids\n\n\n# Edges\n# Use keyword map\n# edges = [(),()]\nedges = []\nfor clique in word_to_idxs.values():\n combi = itertools.combinations(clique, 2)\n edges.extend(combi)\n\n\nG.add_nodes_from(nodes)\nG.add_edges_from(edges)\n\nprint(\"nodes: \"+str(G.number_of_nodes()))\nprint(\"edges: \"+str(G.number_of_edges()))\n\noutput_path = \"./node-researcher_edge-keyword.graphml\"\nnx.write_graphml(G, output_path)\n\n","sub_path":"atlas/assets/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"165620707","text":"# -*- coding:utf-8 -*-\n\nclass Stack:\n def __init__(self):\n self.stack = []\n\n def push(self, ele):\n self.stack.append(ele)\n\n def pop(self):\n return self.stack.pop()\n\n def get_top(self):\n if len(self.stack) > 0:\n return self.stack[-1]\n else:\n return None\n\n\nif __name__ == '__main__':\n stack = Stack()\n stack.push(1)\n stack.push(2)\n stack.push(3)\n print(stack.pop())\n print(stack)\n print(stack.pop())\n","sub_path":"stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"313342575","text":"# -*- encoding: utf-8 -*-\nfrom __future__ import print_function\nimport tensorflow as tf\nimport numpy as np\nimport TensorflowUtils as utils # 加载含bn的python文件.\nimport read_MITSceneParsingData as scene_parsing # 输入数据格式转换的脚本read_MITSceneParsingData.py\nimport readNpData # 图像预处理的,里面有resize.\nimport BatchDatsetReader1 as dataset # 读.png!!!!!!!!!!!!!!!!!!!!!!!!!!!!\nfrom six.moves import xrange\nimport tifffile as tiff\nimport scipy.misc as misc\nimport cv2\nimport misc\nimport datetime\nimport skimage, skimage.morphology, skimage.data, skimage.measure, skimage.segmentation\nimport crf_c as crf\nimport os\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\nREGULARIZATION_RATE = 0.0001\n\n# 初始化变量(变量名,默认值,字符串定义)\nFLAGS = tf.flags.FLAGS\ntf.flags.DEFINE_integer(\"batch_size\", \"8\", \"batch size for training\") # 一个batch两张图\ntf.flags.DEFINE_string(\"logs_dir\", \"logs_deep1102/\", \"path to logs directory\")\ntf.flags.DEFINE_string(\"data_dir\", \"enhance10.30/\", \"path to dataset\")\ntf.flags.DEFINE_float(\"learning_rate\", \"1e-4\", \"Learning rate for Adam Optimizer\")\ntf.flags.DEFINE_string(\"model_dir\", \"Model_zoo/\", \"Path to vgg model mat\")\ntf.flags.DEFINE_bool('debug', \"False\", \"Debug mode: True/ False\")\ntf.flags.DEFINE_string('mode', \"test\", \"Mode train/ test/ visualize\") # visualize\n\nFILE_new_2017 = '/home/lenovo/2Tdisk/Wkyao/_/quickbird2017_preliminary_2.tif'\nnew_2017 = tiff.imread(FILE_new_2017).transpose([1, 2, 0])\nFILE_new_2015 = '/home/lenovo/2Tdisk/Wkyao/_/quickbird2015_preliminary_2.tif'\nnew_2015 = tiff.imread(FILE_new_2015).transpose([1, 2, 0])\n\nim_2017 = cv2.imread('/home/lenovo/2Tdisk/Wkyao/_/20170905_preliminary/preliminary/2017.jpg')\nprint(new_2017.shape)\nFILE_2017 = '/home/lenovo/2Tdisk/Wkyao/_/20170905_preliminary/preliminary/quickbird2017.tif'\nim_2017tif = tiff.imread(FILE_2017).transpose([1, 2, 0]) # 把通道转到最后那一维度.即新的im_2017:[,,]前两维是图像长宽,第三维是4(因为有4通道.).\nFILE_2015 = '/home/lenovo/2Tdisk/Wkyao/_/20170905_preliminary/preliminary/quickbird2015.tif'\nim_2015 = tiff.imread(FILE_2015).transpose([1, 2, 0])\nFILE_result = '/home/lenovo/2Tdisk/Wkyao/_/2017/deep_10172216.tif'\nim_result = tiff.imread(FILE_result)\n\nMODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat' # 优秀的VGG19模型.\n\nMAX_ITERATION = int(2e4 + 1)\nNUM_OF_CLASSESS = 2 # 背景和新建筑.\nIMAGE_RESIZE = 256\n\n\ndef vgg_net(weights, image):\n layers = (\n 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',\n\n 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',\n\n 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',\n 'relu3_3', 'conv3_4', 'relu3_4', 'pool3',\n\n 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',\n 'relu4_3', 'conv4_4', 'relu4_4', 'pool4',\n\n 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',\n 'relu5_3', 'conv5_4', 'relu5_4'\n )\n\n net = {}\n current = image\n for i, name in enumerate(layers):\n kind = name[:4] # layer的类型,是conv还是relu或者pool.\n if kind == 'conv':\n kernels, bias = weights[i][0][0][0][0]\n kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + \"_w\")\n bias = utils.get_variable(bias.reshape(-1), name=name + \"_b\")\n\n if name[4:5] == '5': # conv5开始使用atrous_conv2d卷积.\n current = utils.atrous_conv2d_basic(current, kernels, bias, 2) # rate=2,也即pad=2.\n current = utils.batch_norm_layer(current, FLAGS.mode, scope_bn=name) # BN处理.\n else: # conv1-4\n current = utils.conv2d_basic(current, kernels, bias)\n current = utils.batch_norm_layer(current, FLAGS.mode, scope_bn=name) # BN处理.\n elif kind == 'relu':\n current = tf.nn.relu(current, name=name)\n if FLAGS.debug:\n utils.add_activation_summary(current)\n elif kind == 'pool':\n if name[4:5] == '4':\n current = utils.max_pool_1x1(current)\n else:\n current = utils.max_pool_3x3(current)\n net[name] = current\n\n return net\n\n\ndef inference(image, keep_prob, mean): # keep_prob为dropout的占位符.\n print(\"setting up vgg initialized conv layers ...\")\n model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL) # 下载得到VGGmodel\n mean_pixel = mean\n weights = np.squeeze(model_data['layers'])\n\n processed_image = utils.process_image(image, mean_pixel) # 图像像素值-平均像素值\n with tf.variable_scope(\"inference\"):\n image_net = vgg_net(weights, processed_image)\n conv_final_layer = image_net[\"conv5_4\"]\n\n pool5 = utils.max_pool_1x1(conv_final_layer)\n\n # w6~w8都可以做正则化的把?因为都是全连接层啊.\n # 7x7改成3x3,4096改成了1024,可能特征不够?\n\n # 新加的w6-w8 b6-b8都自带初始化.\n W6 = utils.weight_variable([3, 3, 512, 1024], name=\"W6\")\n b6 = utils.bias_variable([1024], name=\"b6\")\n\n # data_format = \"channels_last\" 为默认\n # 使用不同rate的孔卷积.\n Fc6_1 = utils.atrous_conv2d_basic(pool5, W6, b6, 6)\n Fc6_2 = utils.atrous_conv2d_basic(pool5, W6, b6, 12)\n Fc6_3 = utils.atrous_conv2d_basic(pool5, W6, b6, 18)\n Fc6_4 = utils.atrous_conv2d_basic(pool5, W6, b6, 24)\n\n Bn6_1 = utils.batch_norm_layer(Fc6_1, FLAGS.mode, scope_bn='Bn') # bn处理要在relu之前.\n Bn6_2 = utils.batch_norm_layer(Fc6_2, FLAGS.mode, scope_bn='Bn')\n Bn6_3 = utils.batch_norm_layer(Fc6_3, FLAGS.mode, scope_bn='Bn')\n Bn6_4 = utils.batch_norm_layer(Fc6_4, FLAGS.mode, scope_bn='Bn')\n\n relu6_1 = tf.nn.relu(Bn6_1, name=\"relu6_1\")\n relu6_2 = tf.nn.relu(Bn6_2, name=\"relu6_2\")\n relu6_3 = tf.nn.relu(Bn6_3, name=\"relu6_3\")\n relu6_4 = tf.nn.relu(Bn6_4, name=\"relu6_4\")\n if FLAGS.debug:\n utils.add_activation_summary(relu6_1)\n utils.add_activation_summary(relu6_2)\n utils.add_activation_summary(relu6_3)\n utils.add_activation_summary(relu6_4)\n\n relu_dropout6_1 = tf.nn.dropout(relu6_1, keep_prob=keep_prob)\n relu_dropout6_2 = tf.nn.dropout(relu6_2, keep_prob=keep_prob)\n relu_dropout6_3 = tf.nn.dropout(relu6_3, keep_prob=keep_prob)\n relu_dropout6_4 = tf.nn.dropout(relu6_4, keep_prob=keep_prob)\n\n '''\n # 原来的代码\n conv6 = utils.conv2d_basic(pool5, W6, b6)\n relu6 = tf.nn.relu(conv6, name=\"relu6\")\n relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob) # 全连接层才使用dropout,丢弃一些连接,使部分节点的输出为0,避免过拟合.\n '''\n\n W7 = utils.weight_variable([1, 1, 1024, 1024], name=\"W7\")\n b7 = utils.bias_variable([1024], name=\"b7\")\n\n Fc7_1 = utils.conv2d_basic(relu_dropout6_1, W7, b7)\n Fc7_2 = utils.conv2d_basic(relu_dropout6_2, W7, b7)\n Fc7_3 = utils.conv2d_basic(relu_dropout6_3, W7, b7)\n Fc7_4 = utils.conv2d_basic(relu_dropout6_4, W7, b7)\n\n Bn7_1 = utils.batch_norm_layer(Fc7_1, FLAGS.mode, scope_bn='Bn')\n Bn7_2 = utils.batch_norm_layer(Fc7_2, FLAGS.mode, scope_bn='Bn')\n Bn7_3 = utils.batch_norm_layer(Fc7_3, FLAGS.mode, scope_bn='Bn')\n Bn7_4 = utils.batch_norm_layer(Fc7_4, FLAGS.mode, scope_bn='Bn')\n\n relu7_1 = tf.nn.relu(Bn7_1, name=\"relu7_1\")\n relu7_2 = tf.nn.relu(Bn7_2, name=\"relu7_2\")\n relu7_3 = tf.nn.relu(Bn7_3, name=\"relu7_3\")\n relu7_4 = tf.nn.relu(Bn7_4, name=\"relu7_4\")\n\n if FLAGS.debug:\n utils.add_activation_summary(relu7_1)\n utils.add_activation_summary(relu7_2)\n utils.add_activation_summary(relu7_3)\n utils.add_activation_summary(relu7_4)\n\n relu_dropout7_1 = tf.nn.dropout(relu7_1, keep_prob=keep_prob)\n relu_dropout7_2 = tf.nn.dropout(relu7_2, keep_prob=keep_prob)\n relu_dropout7_3 = tf.nn.dropout(relu7_3, keep_prob=keep_prob)\n relu_dropout7_4 = tf.nn.dropout(relu7_4, keep_prob=keep_prob)\n\n '''\n conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)\n relu7 = tf.nn.relu(conv7, name=\"relu7\")\n relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)\n '''\n\n W8 = utils.weight_variable([1, 1, 1024, NUM_OF_CLASSESS], name=\"W8\")\n b8 = utils.bias_variable([NUM_OF_CLASSESS], name=\"b8\")\n\n Fc8_1 = utils.conv2d_basic(relu_dropout7_1, W8, b8)\n Fc8_2 = utils.conv2d_basic(relu_dropout7_2, W8, b8)\n Fc8_3 = utils.conv2d_basic(relu_dropout7_3, W8, b8)\n Fc8_4 = utils.conv2d_basic(relu_dropout7_4, W8, b8)\n\n '''\n relu8_1 = tf.nn.relu(Fc8_1, name=\"relu8_1\")\n relu8_2 = tf.nn.relu(Fc8_2, name=\"relu8_2\")\n relu8_3 = tf.nn.relu(Fc8_3, name=\"relu8_3\")\n relu8_4 = tf.nn.relu(Fc8_4, name=\"relu8_4\")\n\n relu_dropout8_1 = tf.nn.dropout(relu8_1, keep_prob=keep_prob)\n relu_dropout8_2 = tf.nn.dropout(relu8_2, keep_prob=keep_prob)\n relu_dropout8_3 = tf.nn.dropout(relu8_3, keep_prob=keep_prob)\n relu_dropout8_4 = tf.nn.dropout(relu8_4, keep_prob=keep_prob)\n '''\n\n Fc8 = tf.add_n([Fc8_1, Fc8_2, Fc8_3, Fc8_4], name=\"Fc8\") # F8的各个层尺寸一样,感受野不同.\n\n '''\n conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)\n '''\n\n shape = tf.shape(image)\n # print(shape[1], shape[2])\n resize_Fc8 = tf.image.resize_images(Fc8,\n (shape[1], shape[2])) # tf自带的扩尺寸函数resize_images(),默认双线性插值.尺寸扩大8倍至原尺寸256x256\n softmax = tf.nn.softmax(resize_Fc8) # tf.nn.softmax(),使前向计算结果转为概率分布\n annotation_pred = tf.argmax(softmax, dimension=3, name=\"prediction\")\n return tf.expand_dims(annotation_pred, dim=3), resize_Fc8, softmax\n\n\ndef train(loss_val, var_list):\n optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate) # Adam优化算法.\n grads = optimizer.compute_gradients(loss_val, var_list=var_list) # 计算梯度,返回权重更新\n return optimizer.apply_gradients(grads)\n # return loss_val\n\n\ndef main(argv=None):\n # 定义一下regularization:\n regularization = tf.Variable(0, dtype=tf.float32)\n\n keep_probability = tf.placeholder(tf.float32, name=\"keep_probabilty\") # dropout\n image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name=\"input_image\") # 输入\n annotation = tf.placeholder(tf.int32, shape=[None, None, None, 1], name=\"annotation\") # label\n mean = tf.placeholder(tf.float32, name='mean')\n pred_annotation, logits, softmax = inference(image, keep_probability, mean) # logits=resize_F8\n\n\n tf.summary.image(\"input_image\", image, max_outputs=2)\n tf.summary.image(\"ground_truth\", tf.cast(annotation, tf.uint8), max_outputs=2)\n tf.summary.image(\"pred_annotation\", tf.cast(pred_annotation, tf.uint8), max_outputs=2)\n\n loss = tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,\n # tf.nn.sparse_softmax_cross_entropy_with_logits自动对logits(resize_F8)做了softmax处理.\n labels=tf.squeeze(annotation, squeeze_dims=[3]),\n name=\"entropy\"))\n tf.summary.scalar(\"entropy\", loss) # train val公用一个loss节点运算.\n\n trainable_var = tf.trainable_variables() # Variable被收集在名为tf.GraphKeys.VARIABLES的colletion中\n\n if FLAGS.debug:\n for var in trainable_var:\n utils.add_to_regularization_and_summary(var) # 加了正则化.\n\n train_op = train(loss, trainable_var)\n\n print(\"Setting up summary op...\")\n\n print(\"Setting up image reader...\")\n train_records, valid_records = scene_parsing.read_dataset(FLAGS.data_dir) # 数据的转换输入.\n print(len(train_records))\n print(len(valid_records))\n\n print(\"Setting up dataset reader\")\n image_options = {'resize': True, 'resize_size': IMAGE_RESIZE} # 将IMAGE_SIZE大小作为一个batch\n\n if FLAGS.mode == 'train':\n train_dataset_reader = dataset.BatchDatset(train_records, image_options)\n validation_dataset_reader = dataset.BatchDatset(valid_records, image_options) # 是train模式也把validation载入进来.\n\n sess = tf.Session()\n print(\"Setting up Saver...\")\n saver = tf.train.Saver() # 声明tf.train.Saver()类 用于存储模型.\n summary_op = tf.summary.merge_all() # 汇总所有summary.\n summary_writer_train = tf.summary.FileWriter(FLAGS.logs_dir + '/train', sess.graph)\n summary_writer_val = tf.summary.FileWriter(FLAGS.logs_dir + '/val') # 这里不需要再加入graph.\n\n sess.run(tf.global_variables_initializer())\n\n ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir) # 生成check_point,下次可以从check_point继续训练\n if ckpt and ckpt.model_checkpoint_path: # 这两行的作用是:tf.train.get_checkpoint_state()函数通过checkpoint文件(它存储所有模型的名字)找到目录下最新的模型.\n saver.restore(sess, ckpt.model_checkpoint_path)\n print(\"Model restored...\")\n\n if FLAGS.mode == \"train\":\n # mymean = train_dataset_reader._read_images() #强行运行这个函数,把mean传过来.\n # train_dataset_reader 调用._read_images() 需要在里面修改mean是否运算和return.\n # 生成本次数据集的mean值.\n # mymean = [42.11049008, 65.75782253, 74.11216841] #这里要根据数据集更新.\n mymean = [73.9524, 73.9524, 73.9524]\n for itr in xrange(20000, 25001): # 修改itr数值,接着之前的模型数量后继续训练.\n train_images, train_annotations = train_dataset_reader.next_batch(FLAGS.batch_size)\n feed_dict = {image: train_images, annotation: train_annotations, keep_probability: 0.85, mean: mymean}\n sess.run(train_op, feed_dict=feed_dict)\n if itr % 10 == 0:\n # 这里不要运算loss\n train_loss, summary_str = sess.run([loss, summary_op], feed_dict=feed_dict)\n summary_writer_train.add_summary(summary_str, itr)\n print(\"Step: %d, Train_loss:%g\" % (itr, train_loss))\n\n if itr % 100 == 0: # 每训500次测试一下验证集.\n '''\n valid_images, valid_annotations = validation_dataset_reader.next_batch(FLAGS.batch_size)\n valid_loss, summary_str = sess.run([loss, summary_op], feed_dict={image: valid_images,\n annotation: valid_annotations,\n keep_probability: 1.0,\n mean: mymean}) #计算新节点loss_valid的值.\n summary_writer_val.add_summary(summary_str, itr)\n print(\"%s ---> Validation_loss: %g\" % (datetime.datetime.now(), valid_loss))\n '''\n saver.save(sess, FLAGS.logs_dir + \"model.ckpt\", itr)\n\n # summary_writer_val.close()\n summary_writer_train.close()\n\n elif FLAGS.mode == \"visualize\":\n # mymean = [42.11049008, 65.75782253, 74.11216841]\n mymean = [73.9524, 73.9524, 73.9524]\n\n validation_dataset_reader = dataset.BatchDatset(valid_records, image_options)\n valid_images, valid_annotations = validation_dataset_reader.get_random_batch(FLAGS.batch_size)\n pred = sess.run(pred_annotation, feed_dict={image: valid_images, annotation: valid_annotations,\n keep_probability: 1.0, mean: mymean})\n\n valid_annotations = np.squeeze(valid_annotations, axis=3)\n pred = np.squeeze(pred, axis=3)\n\n for itr in range(FLAGS.batch_size):\n utils.save_image(valid_images[itr].astype(np.uint8), FLAGS.logs_dir, name=\"inp_\" + str(5 + itr))\n utils.save_image(valid_annotations[itr].astype(np.uint8), FLAGS.logs_dir, name=\"gt_\" + str(5 + itr))\n utils.save_image(pred[itr].astype(np.uint8), FLAGS.logs_dir, name=\"pred_\" + str(5 + itr))\n print(\"Saved image: %d\" % itr)\n\n elif FLAGS.mode == \"test\":\n im_2017_list = []\n im_2017_list2 = []\n b = []\n global im_2017 # [5106, 15106, 3]\n global new_2017 # [8106, 15106, 3]\n global new_2015\n for i in range(25):\n b = new_2017[0:3000, i * 600:i * 600 + 600, 3]\n b = np.array([np.array([b for i in range(3)])])\n b = b.transpose(0, 2, 3, 1)\n im_2017_list.append(b)\n print(b.shape)\n\n '''\n # 多通道\n b = im_2017[0:5106, i * 300 : i * 300 + 300, :]\n b = np.array([b])\n # print(b.shape)\n # b = b.transpose(0, 2, 3, 1)\n im_2017_list.append(b)\n # im_2017_list.append(np.array([np.array([im_2017[0:5106, 15000:15106, 3] for i in range(3)])]).transpose(0, 2, 3, 1))\n #im_2017_list.append(np.array([im_2017[0:5106, 15000:15106, :]])) # .transpose(0, 2, 3, 1))\n im_2017_list.append(np.array([im_2017[0:5106, 15000:15106, :]]))\n '''\n\n im_2017_list.append(\n np.array([np.array([new_2017[0:3000, 15000:15106, 3] for i in range(3)])]).transpose(0, 2, 3, 1))\n '''\n for i in range(50):\n b = new_2017[5106:8106, i * 300:i * 300 + 300, 3]\n b = np.array([np.array([b for i in range(3)])])\n b = b.transpose(0, 2, 3, 1)\n im_2017_list2.append(b)\n im_2017_list2.append(\n np.array([np.array([new_2017[5106:8106, 15000:15106, 3] for i in range(3)])]).transpose(0, 2, 3, 1))\n '''\n allImg = []\n allImg2 = []\n allImg_soft = []\n mymean = [73.9524, 73.9524, 73.9524]\n # mymean = [42.18489547, 36.05229143, 25.13712141]\n\n for n, im_2017_part in enumerate(im_2017_list):\n # print(im_2017_part.shape)\n feed_dict_test = {image: im_2017_part, keep_probability: 1.0, mean: mymean}\n a = sess.run(pred_annotation, feed_dict=feed_dict_test)\n a = np.mean(a, axis=(0, 3))\n allImg.append(a)\n\n '''\n # Shift + Tab\n for n, im_2017_part2 in enumerate(im_2017_list2):\n # print(im_2017_part.shape)\n feed_dict_test = {image: im_2017_part2, keep_probability: 1.0, mean: mymean}\n a = sess.run(pred_annotation, feed_dict=feed_dict_test)\n a = np.mean(a, axis=(0, 3))\n allImg2.append(a)\n '''\n\n # # 使用crf:\n # soft = sess.run(softmax, feed_dict=feed_dict_test)\n # # 运行 sess.run(softmax, feed_dict=feed_dict_test) 得到softmax,即网络前向运算并softmax为概率分布后的结果.\n # soft = np.mean(soft, axis=0).transpose(2, 0, 1)\n # # soft = soft.transpose(2, 0, 1)\n #\n # im_2017_mean = np.mean(im_2017_list[n], axis=0)\n #\n # # print (im_2017_mean.shape) #(5106, 300, 3)\n # c = crf.crf(im_2017_mean, soft)\n # # print (c.shape) #(5106, 300)\n # allImg_soft.append(c) # 保存整张soft图.\n # Crf = np.concatenate(tuple(allImg_soft), axis=1) # axis = 1 在第二个维度上进行拼接.\n # tiff.imsave('/home/lenovo/2Tdisk/Wkyao/_/2017/crf_deep_1101.tif', Crf)\n\n res1 = np.concatenate(tuple(allImg), axis=1).astype(np.uint8)\n # res2 = np.concatenate(tuple(allImg2), axis=1).astype(np.uint8)\n # result = np.concatenate((res1, res2), axis=0)\n tiff.imsave('/home/lenovo/2Tdisk/Wkyao/_/2017/deep_1024_1103.tif', res1)\n\n\ndef resultImgPro(img):\n after_median = cv2.medianBlur(img, 5)\n # tiff.imsave('/home/lenovo/2Tdisk/Wkyao/_/2017_after_median.tif', after_median)\n labeled_img = skimage.measure.label(after_median)\n res = skimage.measure.regionprops(labeled_img)\n j = 0\n for i in range(len(res)):\n if res[i].area > 20000:\n # print ('del')\n j = j + 1\n # print(res[i])\n print(len(res))\n print(j)\n after_del = misc.remove_big_objects(labeled_img, 20000).astype(np.uint8)\n after_del = cv2.medianBlur(after_del, 9)\n for i in range(after_del.shape[0]):\n for j in range(after_del.shape[1]):\n if after_del[i][j] > 0:\n after_del[i][j] = 1\n # print(after_del[i][j])\n tiff.imsave('/home/lenovo/2Tdisk/Wkyao/_/2017/de_result_1012.tif', after_del)\n\n\ndef open_and_close(img):\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (11, 11))\n # 闭运算\n img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)\n # 开运算\n img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\n tiff.imsave('/home/lenovo/2Tdisk/Wkyao/_/2017/zuihouyibosaocaozuo.tif', img)\n\n\ndef change_uint8(img):\n IMG = img\n result = IMG.astype(np.uint8)\n tiff.imsave('/home/lenovo/2Tdisk/Wkyao/_/20177.tif', result)\n\n\ndef im2017_de_im2015():\n after2017 = tiff.imread('/home/lenovo/2Tdisk/Wkyao/_/2017/2017_1012.tif')\n after2015 = tiff.imread('/home/lenovo/2Tdisk/Wkyao/_/2017/2015_1012.tif')\n result = after2017.copy()\n for n1 in range(after2015.shape[0]):\n for n2 in range(after2015.shape[1]):\n if after2015[n1][n2] == 1 and result[n1][n2] == 1:\n result[n1][n2] = 0\n resultImgPro(result)\n print(after2017.shape, after2015.shape)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n # resultImgPro()\n # change_uint8()\n # im2017_de_im2015()\n # open_and_close(im_result)\n","sub_path":"DeepLab_512.py","file_name":"DeepLab_512.py","file_ext":"py","file_size_in_byte":21967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"356967302","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport PIL\nimport copy\n\nvgg_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32)\n\n\ndef extract_box(box):\n return box[:, 0], box[:, 1], box[:, 2], box[:, 3], box[:, 4], box[:, 5]\n\n\ndef extract_tops(det_conf, det_xmax, det_xmin, det_ymax, det_ymin, top_indices):\n top_conf = det_conf[top_indices]\n top_xmin = det_xmin[top_indices]\n top_ymin = det_ymin[top_indices]\n top_xmax = det_xmax[top_indices]\n top_ymax = det_ymax[top_indices]\n return top_conf, top_xmax, top_xmin, top_ymax, top_ymin\n\n\ndef scale_coords_to_image(img, top_xmax, top_xmin, top_ymax, top_ymin):\n xmin = int(round(top_xmin * img.shape[1]))\n ymin = int(round(top_ymin * img.shape[0]))\n xmax = int(round(top_xmax * img.shape[1]))\n ymax = int(round(top_ymax * img.shape[0]))\n return xmax, xmin, ymax, ymin\n\n\ndef save_boxes(img, box, label_classes, top_indices, top_label_indices, must_be_in_classes):\n _, det_conf, det_xmin, det_ymin, det_xmax, det_ymax = extract_box(box)\n top_conf, top_xmax, top_xmin, top_ymax, top_ymin = extract_tops(det_conf, det_xmax, det_xmin, det_ymax, det_ymin,\n top_indices)\n annotations = []\n for i in range(top_conf.shape[0]):\n label = int(top_label_indices[i])\n label_name = label_classes[label - 1]\n if label_name not in must_be_in_classes:\n continue\n xmax, xmin, ymax, ymin = scale_coords_to_image(img, top_xmax[i], top_xmin[i], top_ymax[i], top_ymin[i])\n score = top_conf[i]\n annotations.append({\"class\": label_name, \"y\": ymin, \"x\": xmin,\n \"height\": ymax - ymin + 1, \"width\": xmax - xmin + 1, \"score\": score})\n\n return annotations\n\n\ndef render_boxes(img, box, label_classes, top_indices, top_label_indices, must_be_in_classes):\n _, det_conf, det_xmin, det_ymin, det_xmax, det_ymax = extract_box(box)\n top_conf, top_xmax, top_xmin, top_ymax, top_ymin = extract_tops(det_conf, det_xmax, det_xmin, det_ymax, det_ymin, top_indices)\n\n colors = plt.cm.hsv(np.linspace(0, 1, len(label_classes) + 1)).tolist()\n #img = img[:, :, ::-1] + vgg_mean\n plt.imshow(img / 255.)\n currentAxis = plt.gca()\n for i in range(top_conf.shape[0]):\n xmax, xmin, ymax, ymin = scale_coords_to_image(img, top_xmax[i], top_xmin[i], top_ymax[i], top_ymin[i])\n score = top_conf[i]\n label = int(top_label_indices[i])\n label_name = label_classes[label - 1]\n if label_name not in must_be_in_classes:\n continue\n display_txt = '{:0.2f}, {}, {}'.format(score, label_name, i)\n coords = (xmin, ymin), xmax - xmin + 1, ymax - ymin + 1\n color = colors[label]\n currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))\n currentAxis.text(xmin, ymin, display_txt, bbox={'facecolor': color, 'alpha': 0.5})\n\n plt.show()\n\ndef wrap_entry(fname, box, top_indices, top_label_indices):\n return {\"filename\": fname, \"box\": box, \"top_indices\": top_indices, \"top_label_indices\": top_label_indices}\n\ndef unwrap_entry(entry):\n img = np.asarray(PIL.Image.open(entry['filename']))\n box = entry['box']\n top_indices = entry['top_indices']\n top_label_indices = entry['top_label_indices']\n return box, img, top_indices, top_label_indices\n\ndef get_top_confidence(filenames, boxes, confidence_threshold):\n top_conf = []\n for i, fname in enumerate(filenames):\n # Parse the outputs.\n box = boxes[i]\n det_label, det_conf, _, _, _, _ = extract_box(box)\n\n # Get detections with confidence higher than what user selected\n top_indices = [i for i, conf in enumerate(det_conf) if conf >= confidence_threshold]\n\n top_label_indices = det_label[top_indices].tolist()\n top_conf.append(wrap_entry(fname, box, top_indices, top_label_indices))\n\n return top_conf\n\n\ndef get_top_n(filenames, boxes, num_per_class):\n top_n = []\n for i, fname in enumerate(filenames):\n # Parse the outputs.\n box = boxes[i]\n det_label, _, _, _, _, _ = extract_box(box)\n\n # Get detections of top N boxes per class\n all_labels = det_label.tolist()\n\n top_all_labels = []\n for label_index in np.unique(all_labels):\n indexes_of_label = np.where(all_labels == label_index)[0][:num_per_class]\n top_all_labels.append(indexes_of_label)\n\n top_indices = [item for sublist in top_all_labels for item in sublist]\n\n top_label_indices = det_label[top_indices].tolist()\n top_n.append(wrap_entry(fname, box, top_indices, top_label_indices))\n return top_n\n\n'''\n Arguments:\n discrim_func: A function that takes in filenames, box-results from model, and a variable to discriminate on\n current impls: get_top_n, get_top_confidence\n'''\ndef save_bboxes(discrim_func, filenames, boxes, discriminator_variable, label_classes, must_be_in_classes=None):\n if not must_be_in_classes:\n must_be_in_classes = label_classes\n top_by_discriminator = discrim_func(filenames, boxes, discriminator_variable)\n img_annos = []\n for entry in top_by_discriminator:\n box, img, top_indices, top_label_indices = unwrap_entry(entry)\n annos = save_boxes(img, box, label_classes, top_indices, top_label_indices, must_be_in_classes)\n img_annos.append({\"class\": \"image\", \"filename\": entry['filename'], \"annotations\": annos})\n return img_annos\n\n'''\n Arguments:\n discrim_func: A function that takes in filenames, box-results from model, and a variable to discriminate on\n current impls: get_top_n, get_top_confidence\n'''\ndef render(discrim_func, filenames, boxes, discriminator_variable, label_classes, must_be_in_classes=None):\n if not must_be_in_classes:\n must_be_in_classes = label_classes\n top_by_discriminator = discrim_func(filenames, boxes, discriminator_variable)\n for entry in top_by_discriminator:\n box, img, top_indices, top_label_indices = unwrap_entry(entry)\n render_boxes(img, box, label_classes, top_indices, top_label_indices, must_be_in_classes)\n\n\ndef save_top_n(filenames, boxes, num_per_class, label_classes, must_be_in_classes=None):\n return save_bboxes(get_top_n, filenames, boxes, num_per_class, label_classes, must_be_in_classes)\n\n\ndef save_above_threshold(filenames, boxes, confidence_threshold, label_classes, must_be_in_classes=None):\n return save_bboxes(get_top_confidence, filenames, boxes, confidence_threshold, label_classes, must_be_in_classes)\n\n\ndef render_top_n(filenames, boxes, num_per_class, label_classes, must_be_in_classes=None):\n render(get_top_n, filenames, boxes, num_per_class, label_classes, must_be_in_classes)\n\n\ndef render_above_threshold(filenames, boxes, confidence_threshold, label_classes, must_be_in_classes=None):\n render(get_top_confidence, filenames, boxes, confidence_threshold, label_classes, must_be_in_classes)\n\n\ndef render_sloth(img_anno, classes, colors=None):\n if not colors:\n colors = plt.cm.hsv(np.linspace(0, 1, len(classes) + 1)).tolist()\n img = np.asarray(PIL.Image.open(img_anno['filename']))\n plt.imshow(img / 255.)\n currentAxis = plt.gca()\n for i, box in enumerate(img_anno['annotations']):\n display_txt = '{:0.2f}, {}, {}'.format(box['score'], box['class'], i)\n x, y = box[\"x\"], box[\"y\"]\n coords = (x, y), box[\"width\"], box[\"height\"]\n color = colors[classes.index(box['class'])]\n currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))\n currentAxis.text(x, y, display_txt, bbox={'facecolor': color, 'alpha': 0.5})\n\n plt.show()\n\ndef render_sloth_annotations(img_annotations, classes):\n colors = plt.cm.hsv(np.linspace(0, 1, len(classes) + 1)).tolist()\n for img_anno in img_annotations:\n render_sloth(img_anno, classes, colors)\n\n\n","sub_path":"visual_util.py","file_name":"visual_util.py","file_ext":"py","file_size_in_byte":7944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"553052139","text":"# @Author: JY\r\n# @Date: 2019-01-30T11:40:27+09:00\r\n# @Filename: e_13.py\r\n# @Last modified by: JY\r\n# @Last modified time: 2019-01-30T15:06:07+09:00\r\n# @Copyright: JeeY\r\n\r\nfilename00 = 'd:/Programming/Language_Model/data/korean_wiki/korean_wiki_result_words_01_after_reducing.txt'\r\n\r\n\r\nwith open(filename00, 'r', encoding='utf-8') as f:\r\n while True:\r\n line = f.readline()\r\n if not line: break\r\n print(line)\r\n\r\n\r\n\r\n## endl\r\n","sub_path":"sourcefiles/parser/e_13.py","file_name":"e_13.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"136079131","text":"\n# \"Stopwatch: The Game\"\n# Author: Sean Guo\n# Date: 2015/3/9\n# http://www.codeskulptor.org/#user39_QJ94HexE36smqAa.py\n\n\nimport simplegui\n# define global variables\ncounter = 0\nx = 0\ny = 0\nrunning = False\n\n# define helper function format that converts time\n# in tenths of seconds into formatted string A:BC.D\ndef format(t):\n minutes = counter / 600\n seconds = counter % 600 / 10\n milliseconds = counter % 10\n return str(minutes) + \":\" + str(seconds) + \".\" + str(milliseconds)\n \n# define event handlers for buttons; \"Start\", \"Stop\", \"Reset\"\ndef click_start():\n global running\n if running == False:\n timer.start()\n running = True\n else:\n pass\n \ndef click_stop():\n global x\n global y\n global running\n if running == True:\n timer.stop()\n running = False\n y += 1\n if counter % 10 == 0:\n x += 1\n else:\n # running = False\n pass # nothing to do\n \ndef click_reset():\n global counter\n global x\n global y\n global running\n timer.stop()\n running = False\n counter = 0\n x = 0\n y = 0\n\n# define event handler for timer with 0.1 sec interval\ndef timer_handler():\n global counter\n counter += 1\n\n# define draw handler\ndef draw(canvas):\n canvas.draw_text(format(counter), [50,120], 48, \"Red\")\n canvas.draw_text(str(x) + \"/\" + str(y), [150,20], 20, \"Red\")\n \n# create frame\nframe = simplegui.create_frame(\"StopWatch\", 200, 200)\n# register event handlers\nframe.add_button(\"Start\", click_start, 100)\nframe.add_button(\"Stop\", click_stop, 100)\nframe.add_button(\"Reset\", click_reset, 100)\nframe.set_draw_handler(draw)\n\n# create timer, interval = 0.1s;\ntimer = simplegui.create_timer(100, timer_handler)\n\n# start frame\nframe.start()\n\n# Please remember to review the grading rubric\n\n","sub_path":"Coursera/StopWathc.py","file_name":"StopWathc.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"518438295","text":"import sys\nimport time\nimport urllib2\nfrom livebotchart import LiveBotChart\nfrom botstrategy import BotStrategy\nfrom botcandlestick import BotCandlestick\n\ndef main(argv):\n chart = LiveBotChart()\n strategy = BotStrategy(50)\n strategy.candlesticks = chart.preliminaryCandlesticks()\n developingCandlestick = BotCandlestick()\n\n while True:\n try:\n developingCandlestick.tick(chart.getCurrentPrice())\n except urllib2.URLError:\n time.sleep(1)\n developingCandlestick.tick(chart.getCurrentPrice())\n\n if developingCandlestick.isClosed():\n strategy.candlesticks.append(developingCandlestick)\n strategy.evaluatePositions()\n developingCandlestick = BotCandlestick()\n\n time.sleep(1)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"live.py","file_name":"live.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"604485646","text":"import collections\nimport cProfile, pstats, io\n\ndef flatten(iterable):\n for element in iterable:\n if isinstance(element, collections.Iterable) and not isinstance(element, (str, bytes)):\n yield from flatten(element)\n else:\n yield element\n\nclass AttrDict(dict):\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self\n\nclass Structure:\n pass\n\ndef profile(fun, num=1):\n\n \"\"\"A decorator that uses CProfile to profile matrix_array function.\"\"\"\n\n def inner(*args, **kwargs):\n pr = cProfile.Profile()\n pr.enable()\n for _ in range(num):\n retval = fun(*args, **kwargs)\n pr.disable()\n s = io.StringIO()\n sortby = 'cumulative'\n ps = pstats.Stats(pr, stream=s).sort_stats(sortby)\n ps.print_stats()\n print(s.getvalue())\n return retval\n\n return inner\n\n","sub_path":"elements/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"414353586","text":"from __future__ import division\nimport random\nfrom src.environment import Environment\nfrom src.agents import Agent\nfrom read_input import Data\nfrom itertools import count\nimport numpy as np\nfrom collections import deque\nimport time\nimport torch\nfrom src.utils import plot, dotdict\n\ncargs = dotdict({\n 'run_mode': 'train',\n 'visualize': True,\n 'min_size': 7,\n 'max_size': 7,\n 'n_games': 3,\n 'num_iters': 20000,\n 'n_epochs': 1000000,\n 'n_maps': 1000,\n 'show_screen': True,\n})\n\nargs = [\n dotdict({\n 'optimizer': 'adas',\n 'lr': 1e-4,\n 'exp_rate': 0.0,\n 'gamma': 0.99,\n 'tau': 0.01,\n 'max_grad_norm': 0.3,\n 'discount': 0.6,\n 'num_channels': 64,\n 'batch_size': 256,\n 'replay_memory_size': 100000,\n 'dropout': 0.6,\n 'initial_epsilon': 0.1,\n 'final_epsilon': 1e-4,\n 'dir': './Models/',\n 'load_checkpoint': False,\n 'saved_checkpoint': True\n }),\n \n dotdict({\n 'optimizer': 'adas',\n 'lr': 1e-4,\n 'exp_rate': 0.0,\n 'gamma': 0.99,\n 'tau': 0.01,\n 'max_grad_norm': 0.3,\n 'discount': 0.6,\n 'batch_size': 256,\n 'num_channels': 64,\n 'replay_memory_size': 100000,\n 'dropout': 0.4,\n 'initial_epsilon': 0.1,\n 'final_epsilon': 0.01,\n 'dir': './Models/',\n 'load_checkpoint': False,\n 'saved_checkpoint': True\n })\n]\n\ndef train(): \n data = Data(cargs.min_size, cargs.max_size)\n env = Environment(data.get_random_map(), cargs.show_screen, cargs.max_size)\n agent = [Agent(env, args[0], 'agent_1'), Agent(env, args[1], 'agent_2')]\n wl_mean, score_mean, l_val_mean =\\\n [[deque(maxlen = 10000), deque(maxlen = 10000)] for _ in range(3)]\n wl, score, l_val = [[deque(maxlen = 1000), deque(maxlen = 1000)] for _ in range(3)]\n lr_super = [args[0].exp_rate, args[1].exp_rate]\n cnt_w, cnt_l = 0, 0\n # agent[0].model.load_state_dict(torch.load(checkpoint_path_1, map_location = agent[0].model.device))\n # agent[1].model.load_state_dict(torch.load(checkpoint_path_2, map_location = agent[1].model.device))\n \n for _ep in range(cargs.n_epochs):\n if _ep % 10 == 9:\n print('Training_epochs: {}'.format(_ep + 1))\n for _game in range(cargs.n_games):\n done = False\n start = time.time()\n for _iter in count():\n if cargs.show_screen:\n env.render()\n \n \"\"\" initialize \"\"\"\n actions, state_vals, log_probs, rewards, soft_state, \\\n soft_agent_pos, pred_acts, exp_rewards = [[[], []] for i in range(8)]\n \n \"\"\" update by step \"\"\"\n for i in range(env.num_players):\n soft_state[i] = env.get_observation(i)\n soft_agent_pos[i] = env.get_agent_pos(i)\n pred_acts[i], exp_rewards[i] = agent[i].select_action_smart(soft_state[i], soft_agent_pos[i], env)\n\n \"\"\" select action for each agent \"\"\"\n for agent_id in range(env.n_agents):\n for i in range(env.num_players):\n agent_state = env.get_states_for_step(soft_state[i])\n # not change\n agent_step = env.get_agent_for_step(agent_id, soft_agent_pos)\n act, log_p, state_val = 0, 0, 0\n if random.random() < lr_super[i]:\n act, log_p, state_val = agent[i].select_action_by_exp(\n agent_state, agent_step, pred_acts[i][agent_id])\n else:\n act, log_p, state_val = agent[i].select_action(agent_state, agent_step)\n \n soft_state[i] = env.soft_step_(agent_id, soft_state[i], act, soft_agent_pos[i])\n state_vals[i].append(state_val)\n actions[i].append(act)\n log_probs[i].append(log_p)\n # actions[1] = [np.random.randint(0, env.n_actions - 1) for _ in range(env.n_agents)]\n # actions[1] = [0] * env.n_agents\n # actions[1] = pred_acts[1]\n next_state, final_reward, done, _ = env.step(actions[0], actions[1], cargs.show_screen)\n for i in range(env.n_agents):\n rewards[0].append(final_reward)\n rewards[1].append(- final_reward)\n for j in range(env.num_players):\n if pred_acts[j][i] == actions[j][i]:\n reward = exp_rewards[j][i]\n beta = 0.9\n rewards[j][i] = rewards[j][i] * (1 - beta) + beta * reward\n agent[j].model.store(log_probs[j][i], state_vals[j][i], rewards[j][i])\n if done:\n score[0].append(env.players[0].total_score)\n score[1].append(env.players[1].total_score)\n if env.players[0].total_score > env.players[1].total_score:\n cnt_w += 1\n else:\n cnt_l += 1\n break\n agent[0].learn()\n agent[1].learn()\n end = time.time()\n if _ep > 3:\n l_val[0].append(agent[0].value_loss)\n l_val[1].append(agent[1].value_loss)\n wl[0].append(cnt_w)\n wl[1].append(cnt_l)\n for i in range(2):\n wl_mean[i].append(np.mean(wl[i]))\n score_mean[i].append(np.mean(score[i]))\n l_val_mean[i].append(np.mean(l_val[i]))\n \n env.soft_reset()\n if _ep % 50 == 49:\n if cargs.visualize:\n plot(wl_mean, vtype = 'Win')\n plot(score_mean, vtype = 'Score')\n plot(l_val_mean, vtype = 'Loss_Value')\n print(\"Time: {0: >#.3f}s\". format(1000*(end - start)))\n if args[0].saved_checkpoint:\n agent[0].save_models()\n # torch.save(agent[0].model.state_dict(), checkpoint_path_1)\n if args[1].saved_checkpoint:\n agent[1].save_models()\n # torch.save(agent[1].model.state_dict(), checkpoint_path_2)\n # print('Completed episodes')\n # lr_super *= 0.999\n env = Environment(data.get_random_map(), cargs.show_screen, cargs.max_size)\n \ndef test(): \n data = Data(cargs.min_size, cargs.max_size)\n env = Environment(data.get_random_map(), cargs.show_screen, cargs.max_size)\n agent = [Agent(env, args[0], 'agent_1'), Agent(env, args[1], 'agent_2')]\n wl_mean, score_mean = [[deque(maxlen = 10000), deque(maxlen = 10000)] for _ in range(2)]\n wl, score = [[deque(maxlen = 1000), deque(maxlen = 1000)] for _ in range(2)]\n cnt_w, cnt_l = 0, 0\n # agent[0].model.load_state_dict(torch.load(checkpoint_path_1, map_location = agent[0].model.device))\n # agent[1].model.load_state_dict(torch.load(checkpoint_path_2, map_location = agent[1].model.device))\n \n for _ep in range(cargs.n_epochs):\n if _ep % 10 == 9:\n print('Testing_epochs: {}'.format(_ep + 1))\n done = False\n start = time.time()\n for _iter in count():\n if cargs.show_screen:\n env.render()\n \n \"\"\" initialize \"\"\"\n actions, soft_state, soft_agent_pos = [[[], []] for i in range(3)]\n \n \"\"\" update by step \"\"\"\n for i in range(env.num_players):\n soft_state[i] = env.get_observation(i)\n soft_agent_pos[i] = env.get_agent_pos(i)\n \n \"\"\" select action for each agent \"\"\"\n for agent_id in range(env.n_agents):\n for i in range(env.num_players):\n agent_state = env.get_states_for_step(soft_state[i])\n agent_step = env.get_agent_for_step(agent_id, soft_agent_pos[i])\n act, log_p, state_val = agent[i].select_action(agent_state, agent_step)\n \n soft_state[i] = env.soft_step_(agent_id, soft_state[i], act, soft_agent_pos[i])\n actions[i].append(act)\n # actions[1] = [np.random.randint(0, env.n_actions - 1) for _ in range(env.n_agents)]\n # actions[1] = [0] * env.n_agents\n # actions[1] = pred_acts[1]\n next_state, final_reward, done, _ = env.step(actions[0], actions[1], cargs.show_screen)\n if done:\n score[0].append(env.players[0].total_score)\n score[1].append(env.players[1].total_score)\n if env.players[0].total_score > env.players[1].total_score:\n cnt_w += 1\n else:\n cnt_l += 1\n break\n \n end = time.time()\n \n wl[0].append(cnt_w)\n wl[1].append(cnt_l)\n for i in range(2):\n wl_mean[i].append(np.mean(wl[i]))\n score_mean[i].append(np.mean(score[i]))\n \n if _ep % 50 == 49:\n plot(wl_mean, vtype = 'Win')\n plot(score_mean, vtype = 'Score')\n print(\"Time: {0: >#.3f}s\". format(1000*(end - start)))\n env = Environment(data.get_random_map(), cargs.show_screen, cargs.max_size)\n\n\"\"\"\nCreated on Fri Nov 27 16:00:47 2020\n@author: hien\n\"\"\"\nif __name__ == \"__main__\":\n # lr_super *= 0.999\n # lr_super *= 0.999\n if cargs.run_mode == \"train\":\n train()\n if cargs.run_mode == \"test\":\n test()","sub_path":"two_players_training.py","file_name":"two_players_training.py","file_ext":"py","file_size_in_byte":9957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"614280120","text":"\"\"\"电池包生产企业(租户)用例\"\"\"\nimport json\nimport os\nimport time\nimport unittest\nfrom parameterized import parameterized\nfrom selenium.common.exceptions import StaleElementReferenceException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nimport config\nfrom Page.uniform_entry_page import UniformEntryPage\nfrom Util.DriverUtil import DriverUtils, get_tips_message, get_screenshot_page\nfrom config import BASE_DIR\n\n\ndef battery_search_data():\n \"\"\"供应商名称模糊搜索\"\"\"\n battery_accurate_name_list = list()\n with open(config.BASE_DIR + \"/data/operator_data.json\", encoding=\"utf-8\") as f:\n data = json.load(f)\n data_list = data.get('test_battery_pack_management_search_SupplierName')\n for item in data_list:\n battery_accurate_name_list.append((item.get('suName'), item.get('is_success'), item.get('expect')))\n print(battery_accurate_name_list)\n return battery_accurate_name_list\n\n\ndef battery_newly_item():\n \"\"\"新增搜索数据参数化方法\"\"\"\n op_data_list = list()\n with open(config.BASE_DIR + \"/data/operator_data.json\", encoding='utf-8') as f:\n data = json.load(f)\n data_list = data.get('test_battery_pack_manufacturer')\n for item in data_list:\n # 列表添加获取的每一条数据\n op_data_list.append((item.get('code'),\n item.get('typeName'),\n item.get('unCode'),\n item.get('suName'),\n item.get('suProduct'),\n item.get('status'),\n item.get('taxStatus'),\n item.get('legal'),\n item.get('contact'),\n item.get('capital'),\n item.get('number'),\n item.get('zipCode'),\n item.get('imageUrl'),\n item.get('scope'),\n item.get('address'),\n item.get('note'),\n item.get('is_success'),\n item.get('expect')))\n print(op_data_list)\n return op_data_list\n\n\ndef battery_search_legal():\n \"\"\"法人代表搜索\"\"\"\n battery_accurate_legal_list = list()\n with open(config.BASE_DIR + \"/data/operator_data.json\", encoding=\"utf-8\") as f:\n data = json.load(f)\n data_list = data.get('test_battery_pack_management_search_legal')\n for item in data_list:\n battery_accurate_legal_list.append((item.get('legal'), item.get('is_success'), item.get('expect')))\n print(battery_accurate_legal_list)\n return battery_accurate_legal_list\n\n\nclass TestBatteryPackManagement(unittest.TestCase):\n driver = None\n\n @classmethod\n def setUpClass(cls) -> None:\n cls.driver = DriverUtils.get_driver_file()\n config.init_log_config() # 日志\n cls.uniform_entry_page = UniformEntryPage(cls.driver) # 业务执行示例对象\n\n @classmethod\n def tearDownClass(cls) -> None:\n time.sleep(3)\n DriverUtils.quit_driver()\n\n def setUp(self) -> None:\n self.driver.get(\"http://10.100.81.181/#/login\")\n # self.driver.get(\"http://10.100.81.154/#/login\")\n # self.driver.get(\"http://10.100.81.154/#/homepage\")\n self.uniform_entry_page.get_login_proxy().login(\"18701397590\", \"66666666\") # 登录\n self.uniform_entry_page.get_home_proxy().Supplier_management() # 供应商管理\n\n @parameterized.expand(battery_newly_item())\n def test_battery_management_newly_increased(self, code, typeName, unCode, suName, suProduct, status, taxStatus,\n legal, contact, capital, number, zipCode, filePath, scope, address,\n note, is_success, expect):\n \"\"\"\n 电池包生产企业---新增\n :param code: 企业备案代码\n :param typeName: 供应商类型\n :param unCode: 统一社会信用代码\n :param suName: 供应商名称\n :param suProduct: 供应商产品\n :param status: 供应商状态\n :param taxStatus: 纳税人身份\n :param legal: 法人代表\n :param contact: 联系人\n :param capital: 注册资金\n :param number: 联系人电话\n :param zipCode: 邮编\n :param filePath: 营业执照\n :param scope: 经营范围\n :param address: 注册地址\n :param note: 备注\n :param is_success:\n :param expect:\n :return:\n \"\"\"\n self.uniform_entry_page.get_batterypackManagementPage().battery_management_newly_increased(code, typeName, unCode, suName,\n suProduct, status, taxStatus,\n legal, contact, capital,\n number, zipCode, filePath,\n scope, address, note)\n if is_success:\n try:\n code_loc = (By.XPATH, '//*[@class=\"app-main-content\"]/div[2]/div[1]/div[3]/table/tbody/tr/td[3]')\n code_text = self.driver.find_elements(*code_loc)\n code_list = list()\n for i in code_text:\n code_list.append(i.text)\n print(\"统一社会信用代码:{}\".format(code_list))\n try:\n self.assertIn(expect, \"{}\".format([code_list[x] for x in range(len(code_list))]))\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n except AssertionError:\n get_screenshot_page(self.driver, \"battery_pack_management_newly_add_bug\")\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n except TimeoutError:\n get_screenshot_page(self.driver, \"battery_pack_management_newly_add_bug\")\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n else:\n try:\n self.assertIn(expect, \"{}\".format(get_tips_message()))\n self.uniform_entry_page.get_home_proxy().quit_login()\n except AssertionError:\n get_screenshot_page(self.driver, \"battery_pack_management_newly_add_bug\")\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n\n def test_battery_export(self):\n \"\"\"导出\"\"\"\n path = BASE_DIR + \"\\File\\供应商信息.xlsx\"\n # 判断路径中是否有重复内容\n if os.path.exists(path):\n # 删除\n os.remove(path)\n # 数据列表暂无数据\n tips = self.uniform_entry_page.get_batterypackManagementPage().get_battery_search_search_tips()\n if tips != \"暂无数据\":\n WebDriverWait(self.driver, 5, poll_frequency=1).until(\n self.uniform_entry_page.get_batterypackManagementPage().battery_export_table())\n # 获取文件名\n filename = os.path.basename(path)\n print(\"文件名称: %s\", filename)\n # 获取文件类型(扩展名)\n fileType = os.path.splitext(path)\n print(\"文件扩展名:%s\", fileType)\n try:\n self.assertIn(\"供应商信息\", filename)\n self.assertIn(\".xlsx\", fileType)\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n except AssertionError:\n get_screenshot_page(self.driver, \"battery_pack_management_export_file_bug\")\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n else:\n WebDriverWait(self.driver, 5, poll_frequency=1).until(\n self.uniform_entry_page.get_batterypackManagementPage().battery_export_table())\n error_tips = self.uniform_entry_page.get_batterypackManagementPage().get_error_text()\n try:\n self.assertIn(\"当前没有可以导出的数据\", error_tips)\n self.uniform_entry_page.get_home_proxy().quit_login()\n except AssertionError:\n get_screenshot_page(self.driver, \"battery_pack_management_export_file_bug\")\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n\n\n else:\n self.uniform_entry_page.get_batterypackManagementPage().battery_export_table() # 导出\n # 获取文件名\n filename = os.path.basename(path)\n print(\"文件名称: %s\", filename)\n # 获取文件类型(扩展名)\n fileType = os.path.splitext(path)\n print(\"文件扩展名:%s\", fileType)\n try:\n self.assertIn(\"供应商信息\", filename)\n self.assertIn(\".xlsx\", fileType)\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n except AssertionError:\n get_screenshot_page(self.driver, \"battery_pack_management_export_file_bug\")\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n\n @parameterized.expand(battery_search_data())\n def test_battery_search_supplier_name(self, SupplierName, is_success, expect):\n \"\"\"\n 供应商搜索\n :param SupplierName: 供应商名称\n :param is_success:\n :param expect:\n :return:\n \"\"\"\n self.uniform_entry_page.get_batterypackManagementPage().search_battery_Supplier_name(SupplierName)\n if is_success:\n try:\n su_name_loc = (By.XPATH, '//div[@class=\"app-main-content\"]/div[2]/div[1]/div[3]/table/tbody/tr/td[2]')\n try:\n self.assertIn(expect, self.driver.find_element(*su_name_loc).text)\n self.uniform_entry_page.get_home_proxy().quit_login()\n except AssertionError:\n # 截图\n get_screenshot_page(self.driver, \"battery_pack_management_search_suName_bug\")\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n except TimeoutError:\n # 截图\n get_screenshot_page(self.driver, \"battery_pack_management_search_suName_bug\")\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n\n else:\n try:\n self.assertIn(expect, self.uniform_entry_page.get_batterypackManagementPage().get_battery_search_search_tips())\n self.uniform_entry_page.get_home_proxy().quit_login()\n except AssertionError:\n # 截图\n get_screenshot_page(self.driver, \"battery_pack_management_search_suName_bug\")\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n\n @parameterized.expand(battery_search_legal())\n def test_battery_search_battery_legal_representative(self, legal, is_success, expect):\n \"\"\"\n 法人代表搜索\n :param legal: 法人代表\n :param is_success:\n :param expect:\n :return:\n \"\"\"\n self.uniform_entry_page.get_batterypackManagementPage().search_battery_Legal_representative(legal)\n if is_success:\n try:\n legal_text = self.driver.find_elements(*(By.XPATH, '//*[@class=\"app-main-content\"]/div[2]/div[1]/div[3]/table/tbody/tr/td[6]'))\n except StaleElementReferenceException:\n legal_text = self.driver.find_elements(\n *(By.XPATH, '//*[@class=\"app-main-content\"]/div[2]/div[1]/div[3]/table/tbody/tr/td[6]'))\n legal_list = []\n for i in legal_text:\n legal_list.append(i.text)\n print(\"法人代表搜索列表:{}\".format(legal_list))\n try:\n self.assertIn(expect, \"{}\".format([legal_list[x] for x in range(len(legal_list))]))\n self.uniform_entry_page.get_home_proxy().quit_login()\n except AssertionError:\n get_screenshot_page(self.driver, \"battery_pack_management_search_legal_bug\")\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n else:\n try:\n get_no_data = self.driver.find_element(By.XPATH,\n '//*[@id=\"app\"]/div/div[2]/section/div/div[2]/div[1]/div[3]/div/span')\n try:\n self.assertIn(expect, get_no_data.text)\n self.uniform_entry_page.get_home_proxy().quit_login()\n except AssertionError:\n # 截图\n get_screenshot_page(self.driver, \"battery_pack_management_search_legal_bug\")\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n except TimeoutError:\n # 截图\n get_screenshot_page(self.driver, \"battery_pack_management_search_legal_bug\")\n self.uniform_entry_page.get_home_proxy().quit_login() # 退出\n\n def test_battery_pack_manufacturer_details(self):\n \"\"\"详情\"\"\"\n self.uniform_entry_page.get_batterypackManagementPage().Battery_pack_manufacturer_details()\n self.uniform_entry_page.get_home_proxy().quit_login()\n","sub_path":"scripts/test_battery_pack_management.py","file_name":"test_battery_pack_management.py","file_ext":"py","file_size_in_byte":13827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"490797208","text":"\"\"\"Spinless fermion Haldane model.\nHamiltonian based on: \"Characterization and stability of a fermionic ν=1/3 fractional Chern insulator\".\"\"\"\n\nimport numpy as np\n\nfrom tenpy.models.model import CouplingMPOModel, NearestNeighborModel\nfrom tenpy.tools.params import get_parameter\nfrom tenpy.networks.site import FermionSite\n\n\nclass FermionicHaldaneModel(CouplingMPOModel):\n\n def __init__(self, model_params):\n\n model_params.setdefault('lattice', 'Honeycomb')\n CouplingMPOModel.__init__(self, model_params)\n\n def init_sites(self, model_params):\n\n conserve = get_parameter(model_params, 'conserve', 'N', self.name)\n site = FermionSite(conserve=conserve)\n return site\n\n def init_terms(self, model_params):\n\n t = get_parameter(model_params, 't', -1., self.name, True)\n V = get_parameter(model_params, 'V', 0, self.name, True)\n mu = get_parameter(model_params, 'mu', 0., self.name, True)\n phi_ext = 2*np.pi*get_parameter(model_params, 'phi_ext', 0., self.name)\n\n phi = np.arccos(3*np.sqrt(3/43))\n t2 = (np.sqrt(129)/36) * t * np.exp(1j * phi)\n\n for u in range(len(self.lat.unit_cell)):\n\n self.add_onsite(mu, 0, 'N', category='mu N')\n self.add_onsite(-mu, 0, 'N', category='mu N')\n\n for u1, u2, dx in self.lat.nearest_neighbors:\n\n t_phi = self.coupling_strength_add_ext_flux(t, dx, [0, phi_ext])\n self.add_coupling(t_phi, u1, 'Cd', u2, 'C', dx, 'JW', True, category='t Cd_i C_j')\n self.add_coupling(np.conj(t_phi), u2, 'Cd', u1, 'C', -dx, 'JW', True, category='t Cd_i C_j h.c.') # h.c.\n self.add_coupling(V, u1, 'N', u2, 'N', dx, category='V N_i N_j')\n\n for u1, u2, dx in [(0, 0, np.array([-1, 1])), (0, 0, np.array([1, 0])), (0, 0, np.array([0, -1])),\n (1, 1, np.array([0, 1])), (1, 1, np.array([1, -1])), (1, 1, np.array([-1, 0]))]:\n\n t2_phi = self.coupling_strength_add_ext_flux(t2, dx, [0, phi_ext])\n self.add_coupling(t2_phi, u1, 'Cd', u2, 'C', dx, 'JW', True, category='t2 Cd_i C_j')\n self.add_coupling(np.conj(t2_phi), u2, 'Cd', u1, 'C', -dx, 'JW', True, category='t2 Cd_i C_j h.c.') # h.c.\n\n\nclass FermionicHaldaneChain(FermionicHaldaneModel, NearestNeighborModel):\n\n def __init__(self, model_params):\n model_params.setdefault('lattice', \"Chain\")\n CouplingMPOModel.__init__(self, model_params)\n","sub_path":"code/models/old/fermions_haldane.py","file_name":"fermions_haldane.py","file_ext":"py","file_size_in_byte":2446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"610727401","text":"import os, re\nimport pickle\nimport tensorflow as tf\nimport numpy as np\nimport cv2\n\nimport tensorflow.contrib.layers as layers\nfrom tensorflow.contrib.framework import arg_scope\n\nimport libs.cylib as cylib\nimport train_helper\nimport losses\nimport eval_helper\n#import datasets.reader_rgb as reader\nimport datasets.reader as reader\nfrom datasets.cityscapes.cityscapes import CityscapesDataset\n\nFLAGS = tf.app.flags.FLAGS\n\n# RGB\nDATA_MEAN = [75.2051479, 85.01498926, 75.08929598]\nDATA_STD = [46.89434904, 47.63335775, 46.47197535]\n# TODO NORMALIZE DEPTH STD\nDEPTH_MEAN = 37.79630544\nDEPTH_STD = 29.21617326\n\n#DATA_STD = [103.939, 116.779, 123.68]\n#DATA_MEAN = [103.939, 116.779, 123.68]\n\n#MEAN_BGR = [75.08929598, 85.01498926, 75.2051479]\n\nmodel_depth = 121\nimagenet_init = True\n#imagenet_init = False\ninit_dir = '/home/kivan/datasets/pretrained/dense_net/'\napply_jitter = True\n#apply_jitter = False\npool_func = layers.avg_pool2d\n#pool_func = layers.max_pool2d\n\ntrain_step_iter = 0\n\nweight_decay = 1e-4\n#init_func = layers.variance_scaling_initializer(mode='FAN_OUT')\ninit_func = layers.variance_scaling_initializer()\n\nblock_sizes = [6,12,24,16]\n#block_sizes = [6,12,24,16,8]\ncontext_size = 512\n#imagenet_init = False\n#block_sizes = [3,5,6,6,6]\n#context_size = 256\ngrowth = 32\ncompression = 0.5\ngrowth_up = 32\n#up_sizes = [4,4,8,8]\ngrowth_up = 64\nup_sizes = [4,4,6,6]\n#up_sizes = [2,2,4,4]\n#up_sizes = [3,3,4,4]\n\n\nkm = 256\n#km = 512\n# works the same as 256\n#km = 128\n#km = 512\n\nuse_dropout = False\nkeep_prob = 0.8\n\nfused_batch_norm = True\ndata_format = 'NCHW'\nmaps_dim = 1\nheight_dim = 2\n\n#fused_batch_norm = False\n#data_format = 'NHWC'\n#maps_dim = 3\n#height_dim = 1\n\n\nbn_params = {\n # Decay for the moving averages.\n 'decay': 0.9,\n 'center': True,\n 'scale': True,\n # epsilon to prevent 0s in variance.\n 'epsilon': 1e-5,\n # None to force the updates\n 'updates_collections': None,\n # TODO\n 'fused': fused_batch_norm,\n 'data_format': data_format,\n 'is_training': True\n}\n\n\ndef evaluate(name, sess, epoch_num, run_ops, dataset, data):\n loss_val, accuracy, iou, recall, precision = eval_helper.evaluate_segmentation(\n sess, epoch_num, run_ops, dataset.num_examples() // FLAGS.batch_size_valid)\n is_best = False\n if iou > data['best_iou'][0]:\n is_best = True\n data['best_iou'] = [iou, epoch_num]\n data['iou'] += [iou]\n data['acc'] += [accuracy]\n data['loss'] += [loss_val]\n return is_best\n\n\ndef start_epoch(train_data):\n global train_loss_arr, train_conf_mat\n train_conf_mat = np.ascontiguousarray(\n np.zeros((FLAGS.num_classes, FLAGS.num_classes), dtype=np.uint64))\n train_loss_arr = []\n train_data['lr'].append(lr.eval())\n\n\ndef end_epoch(train_data):\n pixacc, iou, _, _, _ = eval_helper.compute_errors(\n train_conf_mat, 'Train', CityscapesDataset.CLASS_INFO)\n train_data['iou'].append(iou)\n train_data['acc'].append(pixacc)\n train_loss_val = np.mean(train_loss_arr)\n train_data['loss'].append(train_loss_val)\n\n\ndef update_stats(ret_val):\n global train_loss_arr\n loss_val = ret_val[0]\n yp = ret_val[1]\n yt = ret_val[2]\n train_loss_arr.append(loss_val)\n yp = yp.argmax(3).astype(np.int32)\n cylib.collect_confusion_matrix(yp.reshape(-1), yt.reshape(-1), train_conf_mat)\n\n\ndef plot_results(train_data, valid_data):\n eval_helper.plot_training_progress(os.path.join(FLAGS.train_dir, 'stats'),\n train_data, valid_data)\n\n\ndef print_results(train_data, valid_data):\n print('\\nBest train IOU = %.2f' % max(train_data['iou']))\n print('Best validation IOU = %.2f (epoch %d)\\n' % tuple(valid_data['best_iou']))\n\n\ndef init_eval_data():\n train_data = {}\n valid_data = {}\n train_data['lr'] = []\n train_data['loss'] = []\n train_data['iou'] = []\n train_data['acc'] = []\n train_data['best_iou'] = [0, 0]\n valid_data['best_iou'] = [0, 0]\n valid_data['loss'] = []\n valid_data['iou'] = []\n valid_data['acc'] = []\n return train_data, valid_data\n\n\ndef normalize_input(bgr, depth):\n with tf.name_scope('input'), tf.device('/cpu:0'):\n if data_format == 'NCHW':\n bgr = tf.transpose(bgr, perm=[0,3,1,2])\n depth = tf.transpose(depth, perm=[0,3,1,2])\n blue, green, red = tf.split(bgr, 3, axis=maps_dim)\n #print(blue, green)\n #img = tf.concat([red, green, blue], 3)\n img = tf.concat([red, green, blue], maps_dim)\n if data_format == 'NCHW':\n mean = tf.constant(DATA_MEAN, dtype=tf.float32, shape=[1,3,1,1])\n std = tf.constant(DATA_STD, dtype=tf.float32, shape=[1,3,1,1])\n else:\n mean = DATA_MEAN\n std = DATA_STD\n #return (img - DATA_MEAN) / DATA_STD, depth - 33\n img = (img - mean) / std\n depth = (depth - DEPTH_MEAN) / DEPTH_STD\n return img, depth\n\n\ndef resize_tensor(net, shape, name):\n if data_format == 'NCHW':\n net = tf.transpose(net, perm=[0,2,3,1])\n net = tf.image.resize_bilinear(net, shape, name=name)\n if data_format == 'NCHW':\n net = tf.transpose(net, perm=[0,3,1,2])\n return net\n\n\ndef refine(net, skip_data):\n print(skip_data)\n skip_net = skip_data[0]\n num_layers = skip_data[1]\n growth = skip_data[2]\n block_name = skip_data[3]\n depth = skip_data[4]\n\n #size_top = top_layer.get_shape()[maps_dim].value\n #skip_width = skip_layer.get_shape()[2].value\n #if top_height != skip_height or top_width != skip_width:\n #print(top_height, skip_height)\n #assert(2*top_height == skip_height)\n \n #TODO try convolution2d_transpose\n #up_shape = tf.shape(skip_net)[height_dim:height_dim+2]\n with tf.variable_scope(block_name):\n up_shape = skip_net.get_shape().as_list()[height_dim:height_dim+2]\n net = resize_tensor(net, up_shape, name='upsample')\n print('\\nup = ', net)\n print('skip = ', skip_net)\n return dense_block_upsample(net, skip_net, depth, num_layers, growth, 'dense_block')\n\n\ndef BNReluConv(net, num_filters, name, k=3, rate=1, first=False, concat=None):\n with arg_scope([layers.conv2d],\n data_format=data_format, stride=1, padding='SAME', activation_fn=None,\n normalizer_fn=None, normalizer_params=None,\n weights_initializer=init_func, biases_initializer=None,\n weights_regularizer=layers.l2_regularizer(weight_decay)):\n with tf.variable_scope(name):\n # TODO check this\n relu = None\n if not first:\n net = tf.contrib.layers.batch_norm(net, **bn_params)\n net = tf.nn.relu(net)\n relu = net\n if concat is not None:\n net = tf.concat([net, concat], maps_dim)\n print('c ', net)\n net = layers.conv2d(net, num_filters, kernel_size=k, rate=rate)\n return net\n\n\ndef layer(net, num_filters, name, is_training, first):\n with tf.variable_scope(name):\n net = BNReluConv(net, 4*num_filters, 'bottleneck', k=1, first=first)\n net = BNReluConv(net, num_filters, 'conv', k=3)\n #if use_dropout and is_training: \n # net = tf.nn.dropout(net, keep_prob=keep_prob)\n return net\n\n\ndef dense_block(net, size, growth, name, is_training=False, first=False, split=False):\n with tf.variable_scope(name):\n for i in range(size):\n x = net\n #net, first_relu = layer(net, k, 'layer'+str(i), is_training, first=first)\n net = layer(net, growth, 'layer'+str(i), is_training, first=first)\n net = tf.concat([x, net], maps_dim)\n if first:\n first = False\n if split and i == (size // 2) - 1:\n split_out = net\n print('Split shape = ', net)\n net = pool_func(net, 2, stride=2, padding='SAME', data_format=data_format)\n print('Dense block out: ', net)\n if split == True:\n return net, split_out\n return net\n\n\ndef dense_block_multigpu(net, size, growth, name, is_training=False, first=False, split=False):\n with tf.variable_scope(name):\n for i in range(size):\n if i < size//2:\n gpu = '/gpu:0'\n else:\n gpu = '/gpu:1'\n with tf.device(gpu):\n x = net\n #net, first_relu = layer(net, k, 'layer'+str(i), is_training, first=first)\n net = layer(net, growth, 'layer'+str(i), is_training, first=first)\n net = tf.concat([x, net], maps_dim)\n if first:\n first = False\n if split and i == (size // 2) - 1:\n split_out = net\n print('Split shape = ', net)\n net = pool_func(net, 2, stride=2, padding='SAME', data_format=data_format)\n print('Dense block out: ', net)\n if split == True:\n return net, split_out\n return net\n\n\ndef dense_block_upsample_new(net, size, growth, name):\n with tf.variable_scope(name):\n num_filters = net.get_shape().as_list()[maps_dim]\n #num_filters = int(round(num_filters*compression))\n num_filters = int(round(num_filters*compression/2))\n #num_filters = int(round(num_filters*0.3))\n # TODO try 3 vs 1\n net = BNReluConv(net, num_filters, 'bottleneck', k=1)\n #net = BNReluConv(net, num_filters, name+'_bottleneck', k=3)\n print('after bottleneck = ', net)\n for i in range(size):\n x = net\n net = BNReluConv(net, growth, 'layer'+str(i))\n net = tf.concat([x, net], maps_dim)\n return net\n #return dense_block(net, size, growth, name)\n\n\n# old refine\n##up_sizes = [128,128,512,512]\n#up_sizes = [256,256,512,512]\n#up_sizes = [196,256,384,512]\n#up_sizes = [256,256,384,512]\n#up_sizes = [128,128,256,256]\n#up_sizes = [128,128,256,512] # 2gpus\n#up_sizes = [128,256,384,512] # 2gpus\n#up_sizes = [128,256,384,512] # 2gpus\nup_sizes = [128,128,256,384,512] # 2gpus\ndef dense_block_upsample(net, skip_net, depth, size, growth, name):\n with tf.variable_scope(name):\n #net = tf.concat([net, skip_net], maps_dim)\n new_size = net.get_shape().as_list()[height_dim:height_dim+2]\n depth = resize_tensor(depth, new_size, 'resize_depth')\n net = tf.concat([net, skip_net, depth], maps_dim)\n num_filters = net.get_shape().as_list()[maps_dim]\n num_filters = int(round(num_filters*compression))\n #num_filters = int(round(num_filters*compression/2))\n #num_filters = int(round(num_filters*0.3))\n\n # TODO try 3 vs 1 -> 3 not helping\n net = BNReluConv(net, num_filters, 'bottleneck', k=1)\n #net = BNReluConv(net, num_filters, 'bottleneck', k=3)\n #net = tf.concat([net, depth], maps_dim)\n #net = BNReluConv(net, num_filters, 'bottleneck', k=3)\n print('after bottleneck = ', net)\n net = BNReluConv(net, size, 'layer')\n return net\n #return dense_block(net, size, growth, name)\n\n#up_sizes = [128,196,256,384]\n#up_sizes = [256,256,256,384]\n# 68.5 :/\n#def dense_block_upsample_oldrefine(net, skip_net, depth, size, growth, name):\n# with tf.variable_scope(name):\n# num_filters = net.get_shape().as_list()[maps_dim]\n# skip_net = BNReluConv(skip_net, num_filters, 'bottleneck', k=1)\n# net = tf.concat([net, skip_net], maps_dim)\n# #net = BNReluConv(net, num_filters, 'bottleneck', k=3)\n# print('after concat = ', net)\n# #net = BNReluConv(net, size, 'layer')\n# new_size = net.get_shape().as_list()[height_dim:height_dim+2]\n# depth = resize_tensor(depth, new_size, 'resize_depth')\n# net = BNReluConv(net, size, 'layer', concat=depth)\n# return net\n\ndef dense_block_upsample_tiramisu(net, size, growth, name):\n with tf.variable_scope(name):\n outputs = []\n for i in range(size):\n x = net\n net = BNReluConv(net, growth, 'layer'+str(i))\n outputs.append(net)\n if i < size - 1:\n net = tf.concat([x, net], maps_dim)\n net = tf.concat(outputs, maps_dim)\n return net\n\n\ndef transition(net, compression, name, stride=2):\n with tf.variable_scope(name):\n net = tf.contrib.layers.batch_norm(net, **bn_params)\n net = tf.nn.relu(net)\n skip_layer = net\n num_filters = net.get_shape().as_list()[maps_dim]\n num_filters = int(round(num_filters*compression))\n net = layers.conv2d(net, num_filters, kernel_size=1)\n # avg works little better on small res\n net = pool_func(net, 2, stride=stride, data_format=data_format, padding='SAME')\n print('Transition: ', net)\n return net, skip_layer\n\n\ndef _buildsingle(image, depth, is_training=False):\n #image = tf.Print(image, [tf.shape(image)], message='img_shape = ', summarize=10)\n bn_params['is_training'] = is_training\n with arg_scope([layers.conv2d],\n data_format=data_format, stride=1, padding='SAME', activation_fn=None,\n normalizer_fn=None, normalizer_params=None,\n weights_initializer=init_func, biases_initializer=None,\n weights_regularizer=layers.l2_regularizer(weight_decay)):\n with tf.variable_scope('conv0'):\n net = layers.conv2d(image, 2*growth, 7, stride=2)\n #net = layers.conv2d(image, 2*growth, 7, stride=1)\n # TODO\n net = tf.contrib.layers.batch_norm(net, **bn_params)\n net = tf.nn.relu(net)\n\n #net = layers.max_pool2d(net, 2, stride=2, padding='SAME',\n # data_format=data_format, scope='pool0')\n net = layers.max_pool2d(net, 2, stride=2, padding='SAME',\n data_format=data_format, scope='pool0')\n\n #depth = resize_tensor(depth, tf.shape(net)[height_dim:height_dim+2],\n # name='resize_depth')\n #net = tf.concat([net, depth], maps_dim)\n skip_layers = []\n\n # no diff with double BN from orig densenet, first=True\n net = dense_block(net, block_sizes[0], growth, 'block0', is_training, first=True)\n #net, skip = dense_block(net, block_sizes[0], growth, 'block0', is_training,\n # first=True, split=True)\n #skip_layers.append([skip, 256, growth_up, 'block0_mid_refine', depth])\n skip_layers.append([net, up_sizes[0], growth_up, 'block0_refine', depth])\n net, skip = transition(net, compression, 'block0/transition')\n #skip_layers.append([skip, 2, k_up, 'block0_refine', depth])\n\n net = dense_block(net, block_sizes[1], growth, 'block1', is_training)\n skip_layers.append([net, up_sizes[1], growth_up, 'block1_refine', depth])\n #net, skip = dense_block(net, block_sizes[1], k, 'block1', is_training, split=True)\n #skip_layers.append([skip, km//2, 'block1_mid', depth])\n net, skip = transition(net, compression, 'block1/transition')\n #skip_layers.append([skip, 3, k_up, 'block1_refine', depth])\n #skip_layers.append([skip, km, 'block1', depth])\n\n # works the same with split, not 100%\n net, skip = dense_block(net, block_sizes[2], growth, 'block2', is_training, split=True)\n skip_layers.append([skip, up_sizes[2], growth_up, 'block2_mid_refine', depth])\n #net = dense_block(net, block_sizes[2], growth, 'block2', is_training)\n skip_layers.append([net, up_sizes[3], growth_up, 'block2_refine', depth])\n\n #skip_layers.append([net, km, 'block2', depth])\n net, skip = transition(net, compression, 'block2/transition')\n #skip_layers.append([skip, 4, k_up, 'block2_refine', depth])\n net = dense_block(net, block_sizes[3], growth, 'block3', is_training)\n #net, skip = dense_block(net, block_sizes[3], k, 'block3', is_training, split=True)\n #skip_layers.append([skip, km, 'block3', depth])\n\n with tf.variable_scope('head'):\n print('5x5')\n net = BNReluConv(net, context_size, 'context_conv', k=5)\n #print('7x7')\n #net = BNReluConv(net, context_size, 'context_conv', k=7)\n print('Before upsampling: ', net)\n mid_logits = net\n\n for skip_layer in reversed(skip_layers):\n net = refine(net, skip_layer)\n print('after upsampling = ', net)\n\n with tf.variable_scope('head'):\n with tf.variable_scope('logits'):\n net = tf.nn.relu(layers.batch_norm(net, **bn_params))\n logits = layers.conv2d(net, FLAGS.num_classes, 1, activation_fn=None,\n data_format=data_format)\n\n with tf.variable_scope('mid_logits'):\n # dont forget bn and relu here\n mid_logits = tf.nn.relu(layers.batch_norm(mid_logits, **bn_params))\n mid_logits = layers.conv2d(mid_logits, FLAGS.num_classes, 1, activation_fn=None,\n data_format=data_format)\n\n if data_format == 'NCHW':\n logits = tf.transpose(logits, perm=[0,2,3,1])\n mid_logits = tf.transpose(mid_logits, perm=[0,2,3,1])\n input_shape = tf.shape(image)[height_dim:height_dim+2]\n logits = tf.image.resize_bilinear(logits, input_shape, name='resize_logits')\n mid_logits = tf.image.resize_bilinear(mid_logits, input_shape, name='resize_mid_logits')\n #if data_format == 'NCHW':\n # top_layer = tf.transpose(top_layer, perm=[0,3,1,2])\n return logits, mid_logits\n\ndef _build(image, depth, is_training=False):\n #image = tf.Print(image, [tf.shape(image)], message='img_shape = ', summarize=10)\n bn_params['is_training'] = is_training\n with arg_scope([layers.conv2d],\n data_format=data_format, stride=1, padding='SAME', activation_fn=None,\n normalizer_fn=None, normalizer_params=None,\n weights_initializer=init_func, biases_initializer=None,\n weights_regularizer=layers.l2_regularizer(weight_decay)):\n gpus = ['/gpu:0', '/gpu:1', '/gpu:2']\n gpu1 = '/gpu:0'\n gpu2 = '/gpu:1'\n #gpu3 = '/gpu:2'\n #with tf.device('/gpu:0'):\n with tf.device(gpus[0]): #bs=2\n with tf.variable_scope('conv0'):\n net = layers.conv2d(image, 2*growth, 7, stride=2)\n #net = layers.conv2d(image, 2*growth, 7, stride=1)\n # TODO\n net = tf.contrib.layers.batch_norm(net, **bn_params)\n net = tf.nn.relu(net)\n\n #net = layers.max_pool2d(net, 2, stride=2, padding='SAME',\n # data_format=data_format, scope='pool0')\n net = layers.max_pool2d(net, 2, stride=2, padding='SAME',\n data_format=data_format, scope='pool0')\n\n #depth = resize_tensor(depth, tf.shape(net)[height_dim:height_dim+2],\n # name='resize_depth')\n #net = tf.concat([net, depth], maps_dim)\n skip_layers = []\n\n # no diff with double BN from orig densenet, first=True\n net = dense_block(net, block_sizes[0], growth, 'block0', is_training, first=True)\n #net, skip = dense_block(net, block_sizes[0], growth, 'block0', is_training,\n # first=True, split=True)\n #skip_layers.append([skip, 256, growth_up, 'block0_mid_refine', depth])\n skip_layers.append([net, up_sizes[0], growth_up, 'block0_refine', depth])\n net, skip = transition(net, compression, 'block0/transition')\n #skip_layers.append([skip, 2, k_up, 'block0_refine', depth])\n\n #net = dense_block(net, block_sizes[1], growth, 'block1', is_training)\n #net = dense_block_multigpu(net, block_sizes[1], growth, 'block1', is_training)\n net = dense_block_multigpu(net, block_sizes[1], growth, 'block1', gpus[:2], 4, is_training)\n skip_layers.append([net, up_sizes[1], growth_up, 'block1_refine', depth])\n #net, skip = dense_block(net, block_sizes[1], k, 'block1', is_training, split=True)\n #skip_layers.append([skip, km//2, 'block1_mid', depth])\n with tf.device(gpu2):\n net, skip = transition(net, compression, 'block1/transition')\n #skip_layers.append([skip, 3, k_up, 'block1_refine', depth])\n #skip_layers.append([skip, km, 'block1', depth])\n\n # works the same with split, not 100%\n net, skip = dense_block(net, block_sizes[2], growth, 'block2', is_training, split=True)\n skip_layers.append([skip, up_sizes[2], growth_up, 'block2_mid_refine', depth])\n #net = dense_block(net, block_sizes[2], growth, 'block2', is_training)\n #net = dense_block(net, block_sizes[2], k, 'block2', is_training)\n skip_layers.append([net, up_sizes[3], growth_up, 'block2_refine', depth])\n\n #skip_layers.append([net, km, 'block2', depth])\n net, skip = transition(net, compression, 'block2/transition')\n #skip_layers.append([skip, 4, k_up, 'block2_refine', depth])\n #net = dense_block(net, block_sizes[3], growth, 'block3', is_training)\n net, skip = dense_block(net, block_sizes[3], growth, 'block3', is_training, split=True)\n skip_layers.append([skip, up_sizes[4], growth_up, 'block3_refine', depth])\n\n with tf.variable_scope('head'):\n #print('5x5')\n #net = BNReluConv(net, context_size, 'context_conv', k=5)\n print('7x7')\n net = BNReluConv(net, context_size, 'context_conv', k=7)\n print('Before upsampling: ', net)\n mid_logits = net\n\n for skip_layer in reversed(skip_layers):\n net = refine(net, skip_layer)\n print('after upsampling = ', net)\n\n with tf.device(gpu2), tf.variable_scope('head'):\n with tf.variable_scope('logits'):\n net = tf.nn.relu(layers.batch_norm(net, **bn_params))\n logits = layers.conv2d(net, FLAGS.num_classes, 1, activation_fn=None,\n data_format=data_format)\n\n with tf.variable_scope('mid_logits'):\n # dont forget bn and relu here\n mid_logits = tf.nn.relu(layers.batch_norm(mid_logits, **bn_params))\n mid_logits = layers.conv2d(mid_logits, FLAGS.num_classes, 1, activation_fn=None,\n data_format=data_format)\n\n if data_format == 'NCHW':\n logits = tf.transpose(logits, perm=[0,2,3,1])\n mid_logits = tf.transpose(mid_logits, perm=[0,2,3,1])\n input_shape = tf.shape(image)[height_dim:height_dim+2]\n logits = tf.image.resize_bilinear(logits, input_shape, name='resize_logits')\n mid_logits = tf.image.resize_bilinear(mid_logits, input_shape, name='resize_mid_logits')\n #if data_format == 'NCHW':\n # top_layer = tf.transpose(top_layer, perm=[0,3,1,2])\n return logits, mid_logits\n\n\ndef create_init_op(params):\n variables = tf.contrib.framework.get_variables()\n init_map = {}\n # clear head vars from imagenet\n remove_keys = []\n for key in params.keys():\n if 'head/' in key:\n print('delete ', key)\n remove_keys.append(key)\n for key in remove_keys:\n del params[key]\n\n for var in variables:\n name = var.name\n if name in params:\n #print(name, ' --> found init')\n #print(var)\n #print(params[name].shape)\n init_map[var.name] = params[name]\n del params[name]\n #else:\n # print(name, ' --> init not found!')\n print('Unused: ', list(params.keys()))\n init_op, init_feed = tf.contrib.framework.assign_from_values(init_map)\n return init_op, init_feed\n\n\ndef jitter(image, labels, depth):\n with tf.name_scope('jitter'), tf.device('/cpu:0'):\n print('\\nJittering enabled')\n global random_flip_tf, resize_width, resize_height\n #random_flip_tf = tf.placeholder(tf.bool, shape=(), name='random_flip')\n random_flip_tf = tf.placeholder(tf.bool, shape=(FLAGS.batch_size), name='random_flip')\n resize_width = tf.placeholder(tf.int32, shape=(), name='resize_width')\n resize_height = tf.placeholder(tf.int32, shape=(), name='resize_height')\n \n #image_split = tf.unstack(image, axis=0)\n #depth_split = tf.unstack(depth, axis=0)\n #weights_split = tf.unstack(weights, axis=0)\n #labels_split = tf.unstack(labels, axis=0)\n out_img = []\n out_depth = []\n #out_weights = []\n out_labels = []\n #image = tf.Print(image, [image[0]], message='img1 = ', summarize=10)\n for i in range(FLAGS.batch_size):\n out_img.append(tf.cond(random_flip_tf[i],\n lambda: tf.image.flip_left_right(image[i]), lambda: image[i]))\n #lambda: tf.image.flip_left_right(image_split[i]),\n #lambda: image_split[i]))\n out_depth.append(tf.cond(random_flip_tf[i],\n lambda: tf.image.flip_left_right(depth[i]),\n lambda: depth[i]))\n #print(cond_op)\n #image_split[i] = tf.assign(image_split[i], cond_op)\n #image[i] = tf.cond(random_flip_tf, lambda: tf.image.flip_left_right(image[i]),\n #cond_op = tf.cond(random_flip_tf, lambda: tf.image.flip_left_right(image[i]),\n #lambda: tf.identity(image[i]))\n #image[i] = tf.assign(image[i], cond_op)\n print(labels)\n out_labels.append(tf.cond(random_flip_tf[i], lambda: tf.image.flip_left_right(labels[i]),\n lambda: labels[i]))\n #out_weights.append(tf.cond(random_flip_tf[i], lambda: tf.image.flip_left_right(weights[i]),\n # lambda: weights[i]))\n image = tf.stack(out_img, axis=0)\n depth = tf.stack(out_depth, axis=0)\n #weights = tf.stack(out_weights, axis=0)\n labels = tf.stack(out_labels, axis=0)\n #image = tf.Print(image, [random_flip_tf], message='random_flip_tf = ', summarize=10)\n #image = tf.Print(image, [image[0]], message='img = ', summarize=10)\n\n # TODO\n #image = tf.image.resize_bicubic(image, [resize_height, resize_width])\n #depth = tf.image.resize_bilinear(depth, [resize_height, resize_width])\n #labels = tf.image.resize_nearest_neighbor(labels, [resize_height, resize_width])\n #weights = tf.image.resize_nearest_neighbor(weights, [resize_height, resize_width])\n #return image, labels, weights, depth\n return image, labels, depth\n\ndef _get_train_feed():\n global random_flip_tf, resize_width, resize_height\n #random_flip = int(np.random.choice(2, 1))\n random_flip = np.random.choice(2, FLAGS.batch_size).astype(np.bool)\n #resize_scale = np.random.uniform(0.5, 2)\n #resize_scale = np.random.uniform(0.4, 1.5)\n #resize_scale = np.random.uniform(0.5, 1.2)\n min_resize = 0.7\n max_resize = 1.3\n if train_step_iter == 0:\n resize_scale = max_resize\n else:\n resize_scale = np.random.uniform(min_resize, max_resize)\n width = np.int32(int(round(FLAGS.img_width * resize_scale)))\n height = np.int32(int(round(FLAGS.img_height * resize_scale)))\n feed_dict = {random_flip_tf:random_flip, resize_width:width, resize_height:height}\n return feed_dict\n\n\ndef build(dataset, is_training, reuse=False):\n with tf.variable_scope('', reuse=reuse):\n x, labels, num_labels, class_hist, depth, img_names = \\\n reader.inputs(dataset, is_training=is_training, num_epochs=FLAGS.max_epochs)\n if is_training and apply_jitter:\n x, labels, depth = jitter(x, labels, depth)\n x, depth = normalize_input(x, depth)\n\n #logits = _build(x, depth, is_training)\n #total_loss = _loss(logits, labels, weights, is_training)\n logits, mid_logits = _build(x, depth, is_training)\n total_loss = _multiloss(logits, mid_logits, labels, num_labels, class_hist, is_training)\n\n if is_training and imagenet_init:\n init_path = init_dir + 'dense_net_' + str(model_depth) + '.pickle'\n with open(init_path, 'rb') as f:\n init_map = pickle.load(f)\n init_op, init_feed = create_init_op(init_map)\n else:\n init_op, init_feed = None, None\n run_ops = [total_loss, logits, labels, img_names]\n if is_training:\n return run_ops, init_op, init_feed\n else:\n return run_ops\n\ndef inference(image, constant_shape=True):\n x = normalize_input(image)\n logits, mid_logits = _build(x, is_training=False)\n return logits, mid_logits\n\n\n#def _multiloss(logits, mid_logits, labels, weights, is_training=True):\ndef _multiloss(logits, mid_logits, labels, num_labels, class_hist, is_training):\n with tf.device(loss_gpu):\n max_weight = FLAGS.max_weight\n #max_weight = 10\n #max_weight = 50\n loss1 = losses.weighted_cross_entropy_loss(\n logits, labels, num_labels, class_hist, max_weight=max_weight)\n loss2 = losses.weighted_cross_entropy_loss(\n mid_logits, labels, num_labels, class_hist, max_weight=max_weight)\n\n #loss1 = losses.weighted_cross_entropy_loss(logits, labels, weights,\n # max_weight=max_weight)\n #loss2 = losses.weighted_cross_entropy_loss(mid_logits, labels, weights,\n # max_weight=max_weight)\n #wgt = 0.4\n #xent_loss = loss1 + wgt * loss2\n wgt = 0.3 # best\n #wgt = 0.2\n #wgt = 0.4\n\n xent_loss = (1-wgt)*loss1 + wgt*loss2\n #xent_loss = loss1\n\n all_losses = [xent_loss]\n # get losses + regularization\n total_loss = losses.total_loss_sum(all_losses)\n if is_training:\n loss_averages_op = losses.add_loss_summaries(total_loss)\n with tf.control_dependencies([loss_averages_op]):\n total_loss = tf.identity(total_loss)\n\n return total_loss\n\n\ndef minimize(loss, global_step, num_batches):\n # Calculate the learning rate schedule.\n decay_steps = int(num_batches * FLAGS.num_epochs_per_decay)\n # Decay the learning rate exponentially based on the number of steps.\n global lr\n #base_lr = 1e-2 # for sgd\n base_lr = FLAGS.initial_learning_rate\n stairs = True\n #stairs = False\n fine_lr_div = 5\n print('fine_lr = base_lr / ', fine_lr_div)\n #lr_fine = tf.train.exponential_decay(base_lr / 10, global_step, decay_steps,\n #lr_fine = tf.train.exponential_decay(base_lr / 20, global_step, decay_steps,\n lr_fine = tf.train.exponential_decay(base_lr / fine_lr_div, global_step, decay_steps,\n FLAGS.learning_rate_decay_factor, staircase=stairs)\n lr = tf.train.exponential_decay(base_lr, global_step, decay_steps,\n FLAGS.learning_rate_decay_factor, staircase=stairs)\n tf.summary.scalar('learning_rate', lr)\n # adam works much better here!\n if imagenet_init:\n opts = [tf.train.AdamOptimizer(lr_fine), tf.train.AdamOptimizer(lr)]\n return train_helper.minimize_fine_tune(opts, loss, global_step, 'head')\n else:\n opt = tf.train.AdamOptimizer(lr)\n return train_helper.minimize(opt, loss, global_step)\n #opts = [tf.train.RMSPropOptimizer(lr_fine, momentum=0.9, centered=True),\n # tf.train.RMSPropOptimizer(lr, momentum=0.9, centered=True)]\n #opts = [tf.train.MomentumOptimizer(lr_fine, 0.9), tf.train.MomentumOptimizer(lr, 0.9)]\n\n\n\ndef train_step(sess, run_ops):\n global train_step_iter\n if apply_jitter:\n feed_dict = _get_train_feed()\n vals = sess.run(run_ops, feed_dict=feed_dict)\n else:\n vals = sess.run(run_ops)\n train_step_iter += 1\n return vals\n\n\ndef num_batches(dataset):\n return dataset.num_examples() // FLAGS.batch_size\n","sub_path":"OLD/models/cityscapes/tmp/model_76.py","file_name":"model_76.py","file_ext":"py","file_size_in_byte":29509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"132640442","text":"import random\nimport sys\nimport os\nimport json\nimport time\nimport spacy\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\nfrom collections import defaultdict\nfrom tqdm import tqdm\n\nfrom nlunetwork import data\nfrom nlunetwork.model import Model\nfrom nlunetwork import metrics\nfrom nlunetwork import runtime_model\n\n# embedding size for labels\nembedding_size = int(os.environ.get('LABEL_EMB_SIZE', 64))\n# size of LSTM cells\nhidden_size = int(os.environ.get('LSTM_SIZE', 100))\n# size of batch\nbatch_size = int(os.environ.get('BATCH_SIZE', 16))\n# number of training epochs\nepoch_num = int(os.environ.get('MAX_EPOCHS', 100))\n\nMY_PATH = os.path.dirname(os.path.abspath(__file__))\n\nOUTPUT_FOLDER = os.environ.get('OUTPUT_FOLDER', '')\nDATASET = os.environ.get('DATASET', 'huric_eb/modern_right')\n# possible MODE:\n# - 'dev_cross' that excludes the last fold and performs (k-1)-fold, last fold untouched\n# - 'cross' that performs k-fold\n# - 'eval' that does the train on k-1 and test on last (untouched fold)\n# - 'train_all' trains the network on all the folds\n# - 'test' takes a pretrained model (path default or `MODEL_PATH`) and runs it on the specified samples (default or `TEST_PATH`)\n# - 'test_all' takes a pretrained model (path default or `MODEL_PATH`) and runs it on the specified samples (default or `TEST_PATH`)\nMODE = os.environ.get('MODE', 'dev_cross')\nOUTPUT_FOLDER += MODE\n\n# specific to test mode\nMODEL_PATH = os.environ.get('MODEL_PATH', 'nlunetwork/results/framenet/results/train_all_loss_both_slottype_full_we_large_recurrent_cell_lstm_attention_both_three_stages_true_highway___hyper:LABEL_EMB_SIZE=64,LSTM_SIZE=128,BATCH_SIZE=2,MAX_EPOCHS=100/framenet/subset_both/')\n\n\n# the type of recurrent unit on the multi-turn: rnn or CRF\nRECURRENT_MULTITURN=os.environ.get('RECURRENT_MULTITURN','gru')\n\n# set this to 'no_all', 'no_bot_turn', 'no_previous_intent' for a partial single-turned net on multi-turn datasets\nFORCE_SINGLE_TURN = os.environ.get('FORCE_SINGLE_TURN', False)\nif FORCE_SINGLE_TURN:\n OUTPUT_FOLDER += '_single_' + FORCE_SINGLE_TURN\nif RECURRENT_MULTITURN != 'gru':\n OUTPUT_FOLDER += '_' + RECURRENT_MULTITURN\n\nLOSS_SUM = os.environ.get('LOSS_SUM', 'both') # 'both' if want to reduce loss of both intents and slots, otherwise 'intent' or 'slots'\nOUTPUT_FOLDER += '_loss_' + LOSS_SUM\nSLOTS_TYPE = os.environ.get('SLOTS_TYPE', 'full') # what part of the slots to consider: 'full': B-Location, 'iob_only': B (corresponds to only boundary detection), 'slot_only': Location\nOUTPUT_FOLDER += '_slottype_' + SLOTS_TYPE\nWORD_EMBEDDINGS = os.environ.get('WORD_EMBEDDINGS', 'large')\nOUTPUT_FOLDER += '_we_' + WORD_EMBEDDINGS\nRECURRENT_CELL = os.environ.get('RECURRENT_CELL', 'lstm')\nOUTPUT_FOLDER += '_recurrent_cell_' + RECURRENT_CELL\nATTENTION = os.environ.get('ATTENTION', 'both') # intents, slots, both, none\nOUTPUT_FOLDER += '_attention_' + ATTENTION\nTHREE_STAGES = os.environ.get('THREE_STAGES', 'true_highway') # add Boundary Detection intermediate level. Can be False, True or truish with 'highway' inside\nif THREE_STAGES.lower() in ('false', 'no', '0'):\n THREE_STAGES = False\nOUTPUT_FOLDER += '_three_stages_{}'.format(THREE_STAGES)\nINTENT_EXTRACTION_MODE = os.environ.get('INTENT_EXTRACTION_MODE', 'bi-rnn') # intent comes out of bi-rnn or only a weighted mean (attention intent must be turned on)\nif INTENT_EXTRACTION_MODE != 'bi-rnn':\n OUTPUT_FOLDER += '_intentextraction_' + INTENT_EXTRACTION_MODE\n\n\n# hyperparams\nOUTPUT_FOLDER += '___hyper:LABEL_EMB_SIZE={},LSTM_SIZE={},BATCH_SIZE={},MAX_EPOCHS={}'.format(embedding_size, hidden_size, batch_size, epoch_num)\n\nprint('environment variables:')\nprint('DATASET:', DATASET, '\\nOUTPUT_FOLDER:', OUTPUT_FOLDER, '\\nMODE:', MODE, '\\nRECURRENT_MULTITURN:', RECURRENT_MULTITURN, '\\nFORCE_SINGLE_TURN:', FORCE_SINGLE_TURN, '\\nWORD_EMBEDDINGS:', WORD_EMBEDDINGS, '\\nRECURRENT_CELL:', RECURRENT_CELL, '\\nATTENTION:', ATTENTION)\n\ndef get_model(vocabs, tokenizer, language, multi_turn, input_steps, nlp):\n model = Model(input_steps, embedding_size, hidden_size, vocabs, WORD_EMBEDDINGS, RECURRENT_CELL, ATTENTION, LOSS_SUM, multi_turn, None, RECURRENT_MULTITURN, THREE_STAGES, INTENT_EXTRACTION_MODE)\n model.build(nlp, tokenizer, language)\n return model\n\n\ndef train(mode):\n global epoch_num\n # maximum length of sentences\n input_steps = 100\n # load the train and dev datasets\n folds = data.load_data(DATASET, SLOTS_TYPE)\n # preprocess them to list of training/test samples\n # a sample is made up of a tuple that contains\n # - an input sentence (list of words --> strings, padded)\n # - the real length of the sentence (int) to be able to recognize padding\n # - an output sequence (list of IOB annotations --> strings, padded)\n # - an output intent (string)\n multi_turn = folds[0]['meta'].get('multi_turn', False)\n print('multi_turn:', multi_turn)\n if multi_turn:\n input_steps *=2\n folds = [data.collapse_multi_turn_sessions(fold, FORCE_SINGLE_TURN) for fold in folds]\n folds = [data.adjust_sequences(fold, input_steps) for fold in folds]\n\n all_samples = [s for fold in folds for s in fold['data']]\n meta_data = folds[0]['meta']\n\n\n # turn off multi_turn for the required additional feeds and previous intent RNN\n if multi_turn and FORCE_SINGLE_TURN == 'no_all' or FORCE_SINGLE_TURN == 'no_previous_intent':\n multi_turn = False\n # get the vocabularies for input, slot and intent\n vocabs = data.get_vocabularies(all_samples, meta_data)\n # and get the model\n if FORCE_SINGLE_TURN == 'no_previous_intent':\n # changing this now, implies that the model doesn't have previous intent\n multi_turn = False\n\n language_model_name = data.get_language_model_name(meta_data['language'], WORD_EMBEDDINGS)\n nlp = spacy.load(language_model_name)\n\n real_folder = MY_PATH + '/results/' + OUTPUT_FOLDER + '/' + DATASET\n if not os.path.exists(real_folder):\n os.makedirs(real_folder)\n\n create_empty_array = lambda: np.zeros((epoch_num, ))\n\n train_folds = []\n test_folds = []\n if mode == 'dev_cross':\n # cross on 1...k-1\n folds = folds[:-1]\n if mode == 'cross' or mode == 'dev_cross':\n for fold_number in range(0, len(folds)):\n train = [s for (count,fold) in enumerate(folds) if count != fold_number for s in fold['data']]\n test = folds[fold_number]['data']\n train_folds.append(train)\n test_folds.append(test)\n elif mode == 'eval':\n # train on 1...k-1, test on k\n train_folds.append([s for (count,fold) in enumerate(folds[:-1]) for s in fold['data']])\n test_folds.append(folds[-1]['data'])\n elif mode == 'train_all':\n train_folds.append([s for (count,fold) in enumerate(folds) for s in fold['data']])\n test_folds.append([])\n elif mode.startswith('test'):\n train_folds.append([])\n if mode == 'test':\n test_folds.append(folds[-1]['data'])\n elif mode == 'test_all':\n test_folds.append([s for (count,fold) in enumerate(folds) for s in fold['data']])\n else:\n raise ValueError('invalid mode')\n\n for fold_number, (training_samples, test_samples) in enumerate(zip(train_folds, test_folds)):\n # reset the graph for next iteration\n tf.reset_default_graph()\n # fix the random seeds\n random_seed_init(len(folds[0]['data']))\n\n print('train samples', len(training_samples))\n if test_samples:\n print('test samples', len(test_samples))\n\n if mode.startswith('test'):\n # restore a model\n model, sess = restore_graph(MODEL_PATH, nlp)\n epoch_num = 1\n else:\n model, sess = build_graph(nlp, vocabs, meta_data, multi_turn, input_steps)\n\n for epoch in range(epoch_num):\n print('epoch {}/{}'.format(epoch + 1, epoch_num))\n #mean_loss = 0.0\n #train_loss = 0.0\n if not mode.startswith('test'):\n for i, batch in tqdm(enumerate(data.get_batch(batch_size, training_samples)), total=len(training_samples)//batch_size):\n # perform a batch of training\n #print(batch)\n #_, loss, bd_prediction, decoder_prediction, intent, mask = model.step(sess, 'train', batch)\n model.step(sess, 'train', batch)\n\n if test_samples:\n test_epoch(model, sess, test_samples, fold_number, real_folder, epoch, input_steps)\n\n if not mode.startswith('test'):\n # the iteration on the fold has completed\n # save the model\n saver = tf.train.Saver()\n saver.save(sess, '{}/model_fold_{}.ckpt'.format(real_folder, fold_number))\n\n if test_samples:\n print('computing the metrics for all epochs on all the folds merged')\n\n # initialize the history that will collect some measures\n history = defaultdict(lambda: defaultdict(create_empty_array))\n for epoch in range(epoch_num):\n json_fold_location = '{}/json/epoch_{}'.format(real_folder, epoch)\n merged_predicitons = data.merge_prediction_folds(json_fold_location)\n data.save_predictions('{}/json/epoch_{}'.format(real_folder, epoch), 'full', merged_predicitons)\n epoch_metrics = metrics.evaluate_epoch(merged_predicitons)\n save_file(epoch_metrics, '{}/scores'.format(real_folder), 'epoch_{}.json'.format(epoch))\n for key, measures in epoch_metrics.items():\n if isinstance(measures, dict):\n for measure_name, value in measures.items():\n history[key][measure_name][epoch] = value\n\n print('averages over the K folds have been computed')\n\n to_plot_precision = {output_type: values['precision'] for output_type, values in history.items()}\n to_plot_recall = {output_type: values['recall'] for output_type, values in history.items()}\n to_plot_f1 = {output_type: values['f1'] for output_type, values in history.items()}\n metrics.plot_history('{}/f1.png'.format(real_folder) , to_plot_f1)\n save_file(history, real_folder, 'history_full.json')\n\ndef build_graph(nlp, vocabs, meta_data, multi_turn, input_steps):\n \"\"\"Builds the computational graph\"\"\"\n model = get_model(vocabs, meta_data['tokenizer'], meta_data['language'], multi_turn, input_steps, nlp)\n\n global_init_op = tf.global_variables_initializer()\n table_init_op = tf.tables_initializer()\n sess = tf.Session()\n\n # initialize the required parameters\n sess.run(global_init_op)\n sess.run(table_init_op)\n\n return model, sess\n\ndef restore_graph(model_path, nlp):\n \"\"\"Restores the stored computational graph, together with the optimized weights\"\"\"\n model = runtime_model.RuntimeModel(model_path, 300, 'en', nlp)\n\n return model, model.sess\n\ndef train_epoch(model, data):\n \"\"\"Perform an epoch of training\"\"\"\n pass # TODO\n\ndef test_epoch(model, sess, test_samples, fold_number, real_folder, epoch, input_steps):\n \"\"\"Perform an epoch of testing\"\"\"\n if fold_number == 0:\n # copy just on the first fold, avoid overwriting\n data.copy_huric_xml_to('{}/xml/epoch_{}'.format(real_folder, epoch))\n\n predicted = []\n for j, batch in tqdm(enumerate(data.get_batch(batch_size, test_samples)), total=len(test_samples)//batch_size):\n results = model.step(sess, 'test', batch)\n intent = results['intent']\n intent_attentions = results['intent_attentions']\n if THREE_STAGES:\n bd_prediction = results['bd']\n bd_prediction = np.transpose(bd_prediction, [1, 0])\n ac_prediction = results['ac']\n ac_prediction = np.transpose(ac_prediction, [1, 0])\n # all the attention matrices are in shape (time, batch, time)\n bd_attentions = results['bd_attentions']\n bd_attentions = np.transpose(bd_attentions, [1, 0, 2])\n ac_attentions = results['ac_attentions']\n ac_attentions = np.transpose(ac_attentions, [1, 0, 2])\n #print('bd_attentions.shape', bd_attentions.shape)\n decoder_prediction = np.array([data.rebuild_slots_sequence(bd_seq, ac_seq) for bd_seq, ac_seq in zip(bd_prediction, ac_prediction)])\n slots_attentions = np.zeros((len(batch), input_steps, input_steps))\n else:\n decoder_prediction = results['slots']\n # from time-major matrix to sample-major\n decoder_prediction = np.transpose(decoder_prediction, [1, 0])\n slots_attentions = results['slots_attentions']\n slots_attentions = np.transpose(slots_attentions, [1, 0, 2])\n bd_attentions = np.zeros((len(batch), input_steps, input_steps))\n ac_attentions = np.zeros((len(batch), input_steps, input_steps))\n\n #print(results)\n predicted_batch = metrics.clean_predictions(decoder_prediction, intent, batch, intent_attentions, bd_attentions, ac_attentions, slots_attentions)\n if DATASET == 'huric':\n data.huric_add_json('{}/xml/epoch_{}'.format(real_folder, epoch), predicted_batch)\n predicted.extend(predicted_batch)\n if j == 0:\n index = random.choice(range(len(batch)))\n # index = 0\n print('Input Sentence :', batch[index]['words'][:batch[index]['length']])\n if THREE_STAGES:\n print('BD Truth :', batch[index]['boundaries'][:batch[index]['length']])\n print('BD Prediction :', bd_prediction[index][:batch[index]['length']].tolist())\n print('AC Truth :', batch[index]['types'][:batch[index]['length']])\n print('AC Prediction :', ac_prediction[index][:batch[index]['length']].tolist())\n #print('BD atts', bd_attentions[index])\n #print('AC atts', ac_attentions[index])\n print('Slot Truth :', batch[index]['slots'][:batch[index]['length']])\n print('Slot Prediction :', decoder_prediction[index][:batch[index]['length']].tolist())\n print('Intent Truth :', batch[index]['intent'])\n print('Intent Prediction :', intent[index])\n print('Intent atts :', intent_attentions[index][:batch[index]['length']])\n\n\n data.save_predictions('{}/json/epoch_{}'.format(real_folder, epoch), fold_number + 1, predicted)\n # epoch resume\n print('epoch {}/{} on fold {}'.format(epoch + 1, epoch_num, fold_number + 1))\n performance = metrics.evaluate_epoch(predicted)\n for metric_name, value in performance.items():\n print('%20s' % metric_name, value)\n\ndef random_seed_init(seed):\n random.seed(seed)\n np.random.seed(seed)\n tf.set_random_seed(seed)\n\ndef save_file(file_content, file_path, file_name):\n if not os.path.exists(file_path):\n os.makedirs(file_path)\n with open('{}/{}'.format(file_path, file_name) , 'w') as out_file:\n json.dump(file_content, out_file, indent=2, cls=data.NumpyEncoder)\n\n\nif __name__ == '__main__':\n train(MODE)\n","sub_path":"nlunetwork/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"291398342","text":"def fibo_recur(n):\n\n if n == 0:\n return 0\n \n if n == 1:\n return 1\n\n if n == 2:\n return 1\n\n return fibo_recur(n-1) + fibo_recur(n-2)\n\n\ndef fibo_dp(n, dp=dict()):\n\n if n == 0:\n return 0\n\n if n == 1 or n == 2:\n return 1\n\n if n in dp:\n return dp[n]\n\n dp[n] = fibo_dp(n-1, dp) + fibo_dp(n-2, dp)\n\n return dp[n]\n\na = int(input())\nprint(fibo_dp(a))\nprint(fibo_recur(a))\n\n","sub_path":"fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"421317608","text":"import os\nimport re\nimport sys\nimport gzip\nimport json\nimport codecs\nimport pyBigWig\nimport subprocess\nimport numpy as np\nimport pandas as pd\nfrom pyfaidx import Fasta\nfrom scipy.stats import spearmanr\nimport optparse\n\nparser = optparse.OptionParser()\nparser.add_option('--targets',\n action=\"store\", dest=\"targets\",\n help=\"targets\", default=None)\noptions, args = parser.parse_args()\n\ntargets = options.targets.split(',')\n\nlibrary = {\n \"ets1\": \"GSE97793_Combined_ets1_100nM_elk1_100nM_50nM_gabpa_100nM_log.xlsx\",\n \"elk1\": \"GSE97793_Combined_ets1_100nM_elk1_100nM_50nM_gabpa_100nM_log.xlsx\",\n \"gabpa\": \"GSE97793_Combined_ets1_100nM_elk1_100nM_50nM_gabpa_100nM_log.xlsx\",\n \"e2f1\": \"GSE97886_Combined_E2f1_200nM_250nM_E2f3_250nM_E2f4_500nM_800nM_log.xlsx\",\n \"e2f3\": \"GSE97886_Combined_E2f1_200nM_250nM_E2f3_250nM_E2f4_500nM_800nM_log.xlsx\",\n \"e2f4\": \"GSE97886_Combined_E2f1_200nM_250nM_E2f3_250nM_E2f4_500nM_800nM_log.xlsx\",\n \"max\": \"GSE97885_Combined_Max_Myc_Mad_Mad_r_log.xlsx\",\n \"mxi\": \"GSE97885_Combined_Max_Myc_Mad_Mad_r_log.xlsx\",\n \"myc\": \"GSE97885_Combined_Max_Myc_Mad_Mad_r_log.xlsx\",\n \"runx1\": \"GSE97691_Combined_Runx1_10nM_50nM_Runx2_10nM_50nM_log.xlsx\",\n \"runx2\": \"GSE97691_Combined_Runx1_10nM_50nM_Runx2_10nM_50nM_log.xlsx\"\n}\n\ncolumn = {\n \"ets1\": \"Ets1_100nM\",\n \"elk1\": \"Elk1_50nM\",\n \"gabpa\": \"Gabpa_100nM\",\n \"e2f1\": \"E2f1_250nM\",\n \"e2f3\": \"E2f3_250nM\",\n \"e2f4\": \"E2f4_500nM\",\n \"max\": \"Max\",\n \"mxi\": \"Mad_r\",\n \"myc\": \"Myc\",\n \"runx1\": \"Runx1_50nM\",\n \"runx2\": \"Runx2_50nM\"\n}\n\nfastapath = \"/users/amr1/pho4/data/genome/hg38/hg38.genome.fa\"\nGenomeDict={}\nsequence=''\ninputdatafile = open(fastapath)\nfor line in inputdatafile:\n if line[0]=='>':\n if sequence != '':\n GenomeDict[chrm] = ''.join(sequence)\n chrm = line.strip().split('>')[1]\n sequence=[]\n Keep=False\n continue\n else:\n sequence.append(line.strip())\nGenomeDict[chrm] = ''.join(sequence)\n\nfor target in targets:\n key = target.split('_')[0]\n dfs = pd.read_excel(\"/users/amr1/pho4/data/experimental/gcPBM/\"+library[key])\n all_xvals = dfs[column[key]]\n probes = dfs['Sequence']\n seqToDdg = {}\n for idx,probe in enumerate(probes):\n seqToDdg[probe] = all_xvals[idx]\n tfToBigWigs = (pyBigWig.open(\"/users/amr1/pho4/data/gcpbm/\"+target+\"/basename_prefix.pooled.positive.bigwig\"),\n pyBigWig.open(\"/users/amr1/pho4/data/gcpbm/\"+target+\"/basename_prefix.pooled.negative.bigwig\"))\n \n seqToCoord = {}\n with gzip.open('/users/amr1/pho4/data/gcpbm/'+target+'/idr.optimal_peak.narrowPeak.gz', 'rt') as inp:\n for line in inp:\n chrm = line.strip().split('\\t')[0]\n start = int(line.strip().split('\\t')[1])-200\n end = int(line.strip().split('\\t')[2])+200\n sequence = GenomeDict[chrm][start:end].upper()\n for probe in probes:\n loc = sequence.find(probe)\n if loc != -1:\n if probe not in seqToCoord:\n seqToCoord[probe] = []\n seqToCoord[probe].append((chrm, start+loc))\n\n print(\"Number of probes in peaks: \", len(seqToCoord.keys()), \", total number of probes: \", len(probes))\n \n xvals = []\n for motif in seqToCoord.keys():\n xvals.append(seqToDdg[motif])\n\n seq_len = 201\n posFootprint = {}\n negFootprint = {}\n for motif in seqToCoord.keys():\n currentPosCounts= []\n currentNegCounts = []\n for chrm, motif_start in seqToCoord[motif]: \n if \"_\" in chrm: continue\n center = motif_start+18\n start = int(center-(seq_len/2))\n end = int(center+(seq_len/2))\n posvals = np.array(tfToBigWigs[0].values(chrm, start, end))\n where_are_NaNs = np.isnan(posvals)\n posvals[where_are_NaNs] = 0.0\n currentPosCounts.append(posvals)\n negvals = np.array(tfToBigWigs[1].values(chrm, start, end))\n where_are_NaNs = np.isnan(negvals)\n negvals[where_are_NaNs] = 0.0\n currentNegCounts.append(negvals)\n posFootprint[motif] = np.mean(np.array(currentPosCounts), axis = 0)\n negFootprint[motif] = np.mean(np.array(currentNegCounts), axis = 0)\n \n window_sizes = [18, 36, 48, 64, 128, 150, 200]\n best_size = -1\n best_val = -1\n for window in window_sizes:\n start = int((seq_len/2)-(window/2))\n end = int((seq_len/2)+(window/2))\n yvals = []\n for flank in seqToCoord.keys():\n yvals.append(np.sum(posFootprint[flank][start:end]+ \\\n negFootprint[flank][start:end]))\n val = spearmanr(xvals, yvals)[0]\n if val > best_val:\n best_val = val\n best_size = window\n \n print(\"target: \", target, \", spearmanr: \", best_val, \", best window: \", best_size)","sub_path":"old_experiments/gcPBM/raw_count_targets.py","file_name":"raw_count_targets.py","file_ext":"py","file_size_in_byte":4916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"498143931","text":"import apiai\nimport json\n\nclass convAPI:\n# def __init__(self, jsonData):\n# self.jsonData = jsonData\n\n def GetIntent(self,msg,sessionID):\n #ai = apiai.ApiAI('390bfd55dc8c4a979998b32b8e884c64')\n ai = apiai.ApiAI('f721b67ed617427d9c30e0efb8977750')\n self.request = ai.text_request()\n self.request.session_id = sessionID\n self.request.query = msg\n resp = self.request.getresponse()\n json_resp_str = str(resp.read().decode(\"utf-8\"))\n data = json.loads(json_resp_str)\n result = data['result']\n sessionID = data['sessionId']\n metadata = result['metadata']\n intent = metadata['intentName']\n confidence = result['score']\n response = result['fulfillment']['speech']\n contexts = result['contexts']\n retState = None\n if contexts: retState = contexts[0]['name']\n userInput = result['resolvedQuery']\n retResponse = 'sessionID:'+str(sessionID)+' '\n retResponse += 'userInput:'+userInput+' '\n retResponse += 'intent:'+intent+' '\n retResponse += 'confidence:'+str(confidence)+' '\n retResponse += 'contexts:'+str(contexts)+' '\n retResponse += (' ' + response)\n return retResponse, retState\n","sub_path":"convAPI.py","file_name":"convAPI.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"28323356","text":"# Plot all columns as subplots\ndf.plot(subplots=True)\nplt.show()\n\n# Plot just the Dew Point data\ncolumn_list1 = ['Dew Point (deg F)']\ndf[column_list1].plot()\nplt.show()\n\n# Plot the Dew Point and Temperature data, but not the Pressure data\ncolumn_list2 = ['Temperature (deg F)','Dew Point (deg F)']\ndf[column_list2].plot()\nplt.show()\n############## axes ##############\n# Create plot axes for the first line plot\nplt.axes([0.05, 0.05, 0.425, 0.9]) #plt.axes([xlo, ylo, width, height])\n# Plot in blue the % of degrees awarded to women in the Physical Sciences\nplt.plot(year, physical_sciences, color='blue')\n\n# Create plot axes for the second line plot\nplt.axes([0.525, 0.05, 0.425, 0.9]) #plt.axes([xlo, ylo, width, height])\n# Plot in red the % of degrees awarded to women in Computer Science\nplt.plot(year, computer_science, color='red')\n\n############## subplot(nr_rows, nr_cols, nr_subplot) ##############\n# Create a figure with 2x2 subplot layout and make the top left subplot active\nplt.subplot(2,2,1)\n\n# Plot in blue the % of degrees awarded to women in the Physical Sciences\nplt.plot(year, physical_sciences, color='blue')\nplt.title('Physical Sciences')\n\n# Make the top right subplot active in the current 2x2 subplot grid \nplt.subplot(2,2,2)\n\n# Plot in red the % of degrees awarded to women in Computer Science\nplt.plot(year, computer_science, color='red')\nplt.title('Computer Science')\n\n# Make the bottom left subplot active in the current 2x2 subplot grid\nplt.subplot(2,2,3)\n\n# Plot in green the % of degrees awarded to women in Health Professions\nplt.plot(year, health, color='green')\nplt.title('Health Professions')\n\n# Make the bottom right subplot active in the current 2x2 subplot grid\nplt.subplot(2,2,4)\n\n# Plot in yellow the % of degrees awarded to women in Education\nplt.plot(year, education, color='yellow')\nplt.title('Education')\n\n# Improve the spacing between subplots and display them\nplt.tight_layout()\nplt.show()\n\n################ Control axis extents (limit) ################\n##### axis, xlim, ylim\n# Plot the % of degrees awarded to women in Computer Science and the Physical Sciences\nplt.plot(year,computer_science, color='red') \nplt.plot(year, physical_sciences, color='blue')\n\n# Add the axis labels\nplt.xlabel('Year')\nplt.ylabel('Degrees awarded to women (%)')\n# Set the x-axis range\nplt.xlim(1990,2010)\n# Set the y-axis range\nplt.ylim(0,50)\n\n##### plt.axis((1990,2010,0,50))\n\n# Add a title and display the plot\nplt.title('Degrees awarded to women (1990-2010)\\nComputer Science (red)\\nPhysical Sciences (blue)')\nplt.show()\n# Save the image as 'xlim_and_ylim.png'\nplt.savefig('xlim_and_ylim.png')\n\n########### legend ###########\n# Specify the label 'Computer Science'\nplt.plot(year, computer_science, color='red', label='Computer Science') \n# Specify the label 'Physical Sciences' \nplt.plot(year, physical_sciences, color='blue', label='Physical Sciences')\n\n# Add a legend at the lower center\nplt.legend(loc='lower center')\n\n# Add axis labels and title\nplt.xlabel('Year')\nplt.ylabel('Enrollment (%)')\nplt.title('Undergraduate enrollment of women')\nplt.show()\n########### annotate & style ###########\n# Import matplotlib.pyplot\nimport matplotlib.pyplot as plt\n\n# Set the style to 'ggplot'\nplt.style.use('ggplot')\n\n# Create a figure with 2x2 subplot layout\nplt.subplot(2, 2, 1) \n\n# Plot the enrollment % of women in the Physical Sciences\nplt.plot(year, physical_sciences, color='blue')\nplt.title('Physical Sciences')\n\n# Plot the enrollment % of women in Computer Science\nplt.subplot(2, 2, 2)\nplt.plot(year, computer_science, color='red')\nplt.title('Computer Science')\n\n# Add annotation\ncs_max = computer_science.max()\nyr_max = year[computer_science.argmax()]\nplt.annotate('Maximum', xy=(yr_max, cs_max), xytext=(yr_max-1, cs_max-10), arrowprops=dict(facecolor='black'))\n\n# Plot the enrollmment % of women in Health professions\nplt.subplot(2, 2, 3)\nplt.plot(year, health, color='green')\nplt.title('Health Professions')\n\n# Plot the enrollment % of women in Education\nplt.subplot(2, 2, 4)\nplt.plot(year, education, color='yellow')\nplt.title('Education')\n\n# Improve spacing between subplots and display them\nplt.tight_layout()\nplt.show()\n\n################################# Seaborn - (on top of matplot) #################################\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n############### linear regression line ###############\n# Plot a linear regression between 'weight' and 'hp'\nsns.lmplot(x='weight', y='hp', data=auto)\nsns.residplot(x='hp', y='mpg', data=auto, color='green')\n\n# Generate a scatter plot of 'weight' and 'mpg' using red circles\nplt.scatter(auto['weight'], auto['mpg'], label='data', color='red', marker='o')\n\n# Plot in blue a linear regression of order 1 between 'weight' and 'mpg', order is used to control the order of polynomial regression\nsns.regplot(x='weight', y='mpg', data=auto, color='blue', label='order 1',scatter=None)\n# Plot in green a linear regression of order 2 between 'weight' and 'mpg'\nsns.regplot(x='weight', y='mpg', data=auto, color='green', label='order 2', order=2,scatter=None)\n# Plot a linear regression between 'weight' and 'hp', with a hue of 'origin' and palette of 'Set1'\nsns.lmplot(x='weight', y='hp', data=auto, hue='origin', palette='Set1')\n# Plot linear regressions between 'weight' and 'hp' grouped row-wise by 'origin'\nsns.lmplot(x='weight', y='hp', data=auto, hue='origin', row='origin') ## generate 3 subplots in rows\n\n# Add a legend and display the plot\nplt.legend(loc='upper right')\nplt.show()\n############### strip plot ###############\n# Make a strip plot of 'hp' grouped by 'cyl'\nplt.subplot(2,1,1)\nsns.stripplot(x='cyl', y='hp', data=auto)\n# Make the strip plot again using jitter and a smaller point size\nplt.subplot(2,1,2)\nsns.stripplot(x='cyl', y='hp', data=auto, size =3, jitter=True)\n\n############### swarm plot ###############\n# Generate a swarm plot of 'hp' grouped horizontally by 'cyl' \nplt.subplot(2,1,1)\nsns.swarmplot(x='cyl', y='hp', data=auto)\n# Generate a swarm plot of 'hp' grouped vertically by 'cyl' with a hue of 'origin'\nplt.subplot(2,1,2)\nsns.swarmplot(x='hp', y='cyl', data=auto, hue='origin', orient = 'h')\n\n############### violinplot ###############\n# Generate a violin plot of 'hp' grouped horizontally by 'cyl'\nplt.subplot(2,1,1)\nsns.violinplot(x='cyl', y='hp', data=auto)\n# Generate the same violin plot again with a color of 'lightgray' and without inner annotations\nplt.subplot(2,1,2)\nsns.violinplot(x='cyl', y='hp', data=auto, inner=None, color='lightgray')\n\n# Overlay a strip plot on the violin plot\nsns.stripplot(x='cyl', y='hp', data=auto, size =1.5, jitter=True)\n############### joint plot ###############\nsns.jointplot(x='hp', y='mpg', data=auto)\n#kind='scatter' uses a scatter plot of the data points\n#kind='reg' uses a regression plot (default order 1)\n#kind='resid' uses a residual plot\n#kind='kde' uses a kernel density estimate of the joint distribution\n#kind='hex' uses a hexbin plot of the joint distribution\n############### pair plot & hearmap ###############\nsns.pairplot(auto)\n# Plot the pairwise joint distributions grouped by 'origin' along with regression lines\nsns.pairplot(auto, hue='origin',kind='reg')\n\n# Visualize the covariance matrix using a heatmap\nsns.heatmap(cov_matrix)\n######################## image histogram ########################\n# Load the image into an array: image\nimage = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg')\n\n# Display image in top subplot using color map 'gray'\nplt.subplot(2,1,1)\nplt.title('Original image')\nplt.axis('off')\nplt.imshow(image, cmap='gray')\n\n# Flatten the image into 1 dimension: pixels\npixels = image.flatten()\n\n# Display a histogram of the pixels in the bottom subplot\nplt.subplot(2,1,2)\nplt.xlim((0,255))\nplt.title('Normalized histogram')\nplt.hist(pixels, bins=64, range=(0,256), normed=True, color='red', alpha=0.4)\n############\n# Load the image into an array: image\nimage = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg')\n\n# Display image in top subplot using color map 'gray'\nplt.subplot(2,1,1)\nplt.imshow(image, cmap='gray')\nplt.title('Original image')\nplt.axis('off')\n\n# Flatten the image into 1 dimension: pixels\npixels = image.flatten()\n\n# Display a histogram of the pixels in the bottom subplot\nplt.subplot(2,1,2)\npdf = plt.hist(pixels, bins=64, range=(0,256), normed=False,\n color='red', alpha=0.4)\nplt.grid('off')\n\n# Use plt.twinx() to overlay the CDF in the bottom subplot\nplt.twinx()\n\n# Display a cumulative histogram of the pixels\ncdf = plt.hist(pixels, bins=64, range=(0,256),\n normed=True, cumulative=True,\n color='blue', alpha=0.4)\n \n# Specify x-axis range, hide axes, add title and display plot\nplt.xlim((0,256))\nplt.grid('off')\nplt.title('PDF & CDF (original image)')\nplt.show()\n##############\n# Load the image into an array: image\nimage = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg')\n\n# Flatten the image into 1 dimension: pixels\npixels = image.flatten()\n\n# Generate a cumulative histogram\ncdf, bins, patches = plt.hist(pixels, bins=256, range=(0,256), normed=True, cumulative=True)\nnew_pixels = np.interp(pixels, bins[:-1], cdf*255)\n\n# Reshape new_pixels as a 2-D array: new_image\nnew_image = new_pixels.reshape(image.shape)\n\n# Display the new image with 'gray' color map\nplt.subplot(2,1,1)\nplt.title('Equalized image')\nplt.axis('off')\nplt.imshow(new_image,cmap='gray')\n\n# Generate a histogram of the new pixels\nplt.subplot(2,1,2)\npdf = plt.hist(new_pixels, bins=64, range=(0,256), normed=False,\n color='red', alpha=0.4)\nplt.grid('off')\n\n# Use plt.twinx() to overlay the CDF in the bottom subplot\nplt.twinx()\nplt.xlim((0,256))\nplt.grid('off')\n\n# Add title\nplt.title('PDF & CDF (equalized image)')\n\n# Generate a cumulative histogram of the new pixels\ncdf = plt.hist(new_pixels, bins=64, range=(0,256),\n cumulative=True, normed=True,\n color='blue', alpha=0.4)\n##############\n# Load the image into an array: image\nimage = plt.imread('hs-2004-32-b-small_web.jpg')\n\n# Display image in top subplot\nplt.subplot(2,1,1)\nplt.title('Original image')\nplt.axis('off')\nplt.imshow(image)\n\n# Extract 2-D arrays of the RGB channels: red, blue, green\nred, green, blue = image[:,:,0], image[:,:,1], image[:,:,2]\n\n# Flatten the 2-D arrays of the RGB channels into 1-D\nred_pixels = red.flatten()\nblue_pixels = blue.flatten()\ngreen_pixels = green.flatten()\n\n# Overlay histograms of the pixels of each color in the bottom subplot\nplt.subplot(2,1,2)\nplt.title('Histograms from color image')\nplt.xlim((0,256))\nplt.hist(red_pixels, bins=64, normed=True, color='red', alpha=0.2)\nplt.hist(blue_pixels, bins=64, normed=True, color='blue', alpha=0.2)\nplt.hist(green_pixels, bins=64, normed=True, color='green', alpha=0.2)\n##########\n# Load the image into an array: image\nimage = plt.imread('hs-2004-32-b-small_web.jpg')\n\n# Extract RGB channels and flatten into 1-D array\nred, blue, green = image[:,:,0], image[:,:,1], image[:,:,2]\nred_pixels = red.flatten()\nblue_pixels = blue.flatten()\ngreen_pixels = green.flatten()\n\n# Generate a 2-D histogram of the red and green pixels\nplt.subplot(2,2,1)\nplt.grid('off') \nplt.xticks(rotation=60)\nplt.xlabel('red')\nplt.ylabel('green')\nplt.hist2d(red_pixels, green_pixels, bins=(32,32))\n\n######################## time series ########################\n# Import matplotlib.pyplot\nimport matplotlib.pyplot as plt\n\n# Plot the ibm time series in green\nplt.plot(ibm, color='green', label='IBM')\n# Add a legend in the top left corner of the plot\nplt.xticks(rotation=60)\n# Specify the orientation of the xticks\nplt.legend(loc='upper left')\n\n# Plot the series in the top subplot in blue\nplt.subplot(2,1,1)\nplt.xticks(rotation=45)\nplt.title('AAPL: 2001 to 2011')\nplt.plot(aapl, color='blue')\nplt.tight_layout()\nplt.show()\n\n# Specify the axes\nplt.axes((0.25, 0.5,0.35, 0.35))\n\n# Plot the sliced series in red using the current axes\nplt.plot(view, color='red')\nplt.xticks(rotation=45)\nplt.title('2007/11-2008/04')\nplt.show()\n############ plot with moving average line ############\n# Plot the 250-day moving average in the bottom right subplot in cyan\nplt.subplot(2, 2, 4)\nplt.plot(mean_250, 'cyan', label='250d')\nplt.plot(aapl, 'k-.')\nplt.xticks(rotation=60)\nplt.title('250d averages')\n###############################################################################\n###################### meshgrid ######################\n# Import numpy and matplotlib.pyplot\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Generate two 1-D arrays: u, v\nu = np.linspace(-2, 2, 41)\nv = np.linspace(-1, 1, 21)\n# Generate 2-D arrays from u and v: X, Y\nX,Y = np.meshgrid(u, v)\n# Compute Z based on X and Y\nZ = np.sin(3*np.sqrt(X**2 + Y**2)) \n\n# Display the resulting image with pcolor()\nplt.pcolor(Z)\nplt.show()\n########### Contour & filled contour ###########\n# Generate a default contour map of the array Z\nplt.subplot(2,2,1)\nplt.contour(X, Y, Z)\n\n# Generate a contour map with 20 contours\nplt.subplot(2,2,2)\nplt.contour(X, Y, Z, 20)\n\n# Generate a default filled contour map of the array Z\nplt.subplot(2,2,3)\nplt.contourf(X, Y, Z)\n\n# Generate a default filled contour map with 20 contours\nplt.subplot(2,2,4)\nplt.contourf(X, Y, Z, 20)\n\n# Improve the spacing between subplots\nplt.tight_layout()\n########### color map ###########\n# Create a filled contour plot with a color map of 'viridis'\nplt.subplot(2,2,1)\nplt.contourf(X,Y,Z,20, cmap='viridis')\nplt.colorbar()\nplt.title('Viridis')\n# Create a filled contour plot with a color map of 'gray'\nplt.subplot(2,2,2)\nplt.contourf(X,Y,Z,20, cmap='gray')\nplt.colorbar()\nplt.title('Gray')\n# Create a filled contour plot with a color map of 'autumn'\nplt.subplot(2,2,3)\nplt.contourf(X,Y,Z,20, cmap='autumn')\nplt.colorbar()\nplt.title('Autumn')\n# Create a filled contour plot with a color map of 'winter'\nplt.subplot(2,2,4)\nplt.contourf(X,Y,Z,20, cmap='winter')\nplt.colorbar()\nplt.title('Winter')\n\n# Improve the spacing between subplots and display them\nplt.tight_layout()\nplt.show()\n\n########### Histogram ###########\n# Generate a 2-D histogram\nplt.hist2d(hp, mpg, bins=(20,20), range=((40,235),(8,48))) # hp is horizontal axis, mpg is vertical axis\n# or plt.hexbin(hp, mpg, gridsize=(15,12),extent=(40,235,8,48))\n# Add a color bar to the histogram\nplt.colorbar()\n\n# Add labels, title, and display the plot\nplt.xlabel('Horse power [hp]')\nplt.ylabel('Miles per gallon [mpg]')\nplt.title('hist2d() plot')\nplt.show()\n\n########### images ###########\n# Load the image into an array: img\nimg = plt.imread('480px-Astronaut-EVA.jpg')\n\n# Print the shape of the image\nprint(img.shape)\n\n# Compute the sum of the red, green and blue channels: intensity\nintensity = img.sum(axis=2)\n\n# Print the shape of the intensity\nprint(intensity.shape)\n\n# Display the intensity with a colormap of 'gray'\nplt.imshow(intensity, cmap='gray')\n\n# Add a colorbar\nplt.colorbar()\n\n# Hide the axes and show the figure\nplt.axis('off')\nplt.show()\n\n# Specify the extent and aspect ratio of the top left subplot\nplt.subplot(2,2,1)\nplt.title('extent=(-1,1,-1,1),\\naspect=0.5') \nplt.xticks([-1,0,1])\nplt.yticks([-1,0,1])\nplt.imshow(img, extent=(-1,1,-1,1), aspect=0.5)\n\n# Specify the extent and aspect ratio of the top right subplot\nplt.subplot(2,2,2)\nplt.title('extent=(-1,1,-1,1),\\naspect=1')\nplt.xticks([-1,0,1])\nplt.yticks([-1,0,1])\nplt.imshow(img, extent=(-1,1,-1,1), aspect=1)\n\n# Specify the extent and aspect ratio of the bottom left subplot\nplt.subplot(2,2,3)\nplt.title('extent=(-1,1,-1,1),\\naspect=2')\nplt.xticks([-1,0,1])\nplt.yticks([-1,0,1])\nplt.imshow(img, extent=(-1,1,-1,1), aspect=2)\n\n# Specify the extent and aspect ratio of the bottom right subplot\nplt.subplot(2,2,4)\nplt.title('extent=(-2,2,-1,1),\\naspect=2')\nplt.xticks([-2,-1,0,1,2])\nplt.yticks([-1,0,1])\nplt.imshow(img, extent=(-2,2,-1,1), aspect=2)\n\n# Improve spacing and display the figure\nplt.tight_layout()\nplt.show()\n\n# Load the image into an array: image\nimage = plt.imread('640px-Unequalized_Hawkes_Bay_NZ.jpg')\n\n# Extract minimum and maximum values from the image: pmin, pmax\npmin, pmax = image.min(), image.max()\nprint(\"The smallest & largest pixel intensities are %d & %d.\" % (pmin, pmax))\n\n# Rescale the pixels: rescaled_image\nrescaled_image = 256*(image-pmin)/(pmax-pmin)\nprint(\"The rescaled smallest & largest pixel intensities are %.1f & %.1f.\" % \n (rescaled_image.min(), rescaled_image.max()))\n\n# Display the original image in the top subplot\nplt.subplot(2,1,1)\nplt.title('original image')\nplt.axis('off')\nplt.imshow(image)\n\n# Display the rescaled image in the bottom subplot\nplt.subplot(2,1,2)\nplt.title('rescaled image')\nplt.axis('off')\nplt.imshow(rescaled_image)\n########### ###########\n########### ###########\n","sub_path":"Learn_Py_Visualization.py","file_name":"Learn_Py_Visualization.py","file_ext":"py","file_size_in_byte":16606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"66822940","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport math\ndef quadratic(a,b,c):\n delta = b**2 - 4*a*c\n sq = math.sqrt(delta)\n x1 = (-b+sq)/2*a\n x2 = (-b-sq)/2*a\n return x1,x2\n\nprint(quadratic(1,2,1))\n\n","sub_path":"quadratic.py","file_name":"quadratic.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"358259312","text":"#!/usr/bin/python3\n\nfrom util import *\n\ndef check(n):\n \"\"\"Return a pandigital formed by this number or zero.\n\n >>> check(192)\n 192384576\n \"\"\"\n\n out, curr = 0, 0\n for i in range(10):\n curr *= 10**len(str(n*i))\n curr += n*i\n if ispandigital(curr):\n out = curr\n return out\n\nimport doctest; doctest.testmod()\n\nbig = 0\nfor i in range(100000):\n c = check(i)\n if c > big:\n big = c\n print(big)\n","sub_path":"py/38.py","file_name":"38.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"37738439","text":"#!/home/carnd/anaconda3/envs/torch/bin/python\n\n# One upside for calling this as shell script rather than as 'python x.py' is that\n# you can see the script name in top/ps - useful when you have a bunch of python processes\n\ntry:\n import generative_playground\nexcept:\n import sys, os, inspect\n my_location = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n sys.path.append('../../..')\n sys.path.append('../../../../../transformer_pytorch')\n\nfrom generative_playground.molecules.train.vae.main_train_vae import train_vae\nfrom generative_playground.molecules.model_settings import get_settings\n\nmolecules = True\ngrammar = True\nsettings = get_settings(molecules,grammar)\n\nsave_file =settings['filename_stub'] + 'baseline__.h5'\nmodel, fitter, _ = train_vae(molecules=molecules,\n grammar=grammar,\n BATCH_SIZE=50, # max 500 on a p2.xlarge\n save_file=save_file,\n sample_z=True,\n encoder_type='cnn',\n decoder_type='step',\n lr=5e-4,\n plot_prefix='baseline lr 5e-4 KLW 0.01',\n reg_weight= 1,\n epsilon_std = 0.01,\n dashboard='main',\n preload_weights=False)\n\nwhile True:\n next(fitter)\n\n","sub_path":"src/generative_playground/molecules/train/vae/train_baseline.py","file_name":"train_baseline.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"124763581","text":"\"\"\"\nModule for validating and converting query string input to the WMS 1.3.0\nservice.\n\"\"\"\n\n# Standard library imports\nimport calendar\nfrom datetime import datetime\nfrom email.utils import parsedate\nimport json\nfrom functools import reduce\nfrom io import BytesIO\nimport operator\nimport pickle\nimport re\nimport time\nfrom urllib.parse import urlparse, unquote\n\n# External imports\nimport numpy as np\nfrom paste.request import construct_url, parse_dict_querystring\nfrom paste.util.converters import asbool\nimport pyproj\nfrom webob.exc import HTTPBadRequest, HTTPNotModified\n\n# Pydap imports\nfrom pydap.model import *\nfrom pydap.lib import walk\n\n# Internal imports\nfrom . import projutils\nfrom . import gridutils\n\nWMS_VERSION = '1.3.0'\nWMS_ARGUMENTS = ['request', 'bbox', 'cmap', 'layers', 'width', 'height', \n 'transparent', 'time', 'level', 'elevation', 'styles',\n 'service', 'version', 'format', 'crs', 'bounds',\n 'exceptions', 'bgcolor', 'expr', 'items', 'size']\nMIN_WIDTH=0\nMAX_WIDTH=8192\nMIN_HEIGHT=0\nMAX_HEIGHT=8192\n\nDEFAULT_CRS = 'EPSG:4326'\n\nSUPPORTED_CRS = ['EPSG:4326', 'EPSG:3857'] #, 'EPSG:3413', 'EPSG:3031']\nSUPPORTED_REQUESTS = ['GetMap', 'GetCapabilities', 'GetMetadata', 'GetColorbar']\nSUPPORTED_FORMATS = ['image/png']\nSUPPORTED_EXCEPTIONS = ['XML']\n\nEXCEPTION_TEMPLATE=\"\"\"\n\n \n \n \n ${message} ${error_code}\n \n \n \n \n ${message}\n \n \n \n\"\"\"\n\nALLOWED_EXCEPTIONS = ['InvalidFormat', 'InvalidCRS', 'LayerNotDefined',\n 'StyleNotDefined', 'MissingDimensionValue',\n 'InvalidDimensionValue', 'OperationNotSupported']\n\nclass WMSException(Exception):\n def __init__(self, message, status_code, error_code=None):\n # Call the base class constructor with the parameters it needs\n super(WMSException, self).__init__(message)\n self.status_code = status_code\n self.error_code = error_code\n\ndef parse_styles(styles_str, layers):\n \"\"\"Parse styles string into list of dicts.\"\"\"\n if len(styles_str) > 0:\n styles = styles_str.split(',')\n else:\n styles = len(layers)*['']\n\n styles_parsed = []\n for style in styles: \n if len(style) > 0: \n style_items_parsed = {}\n style_items = style.split(';') \n for style_item in style_items: \n key, value = style_item.split('=') \n style_items_parsed[key] = value\n styles_parsed.append(style_items_parsed)\n else:\n styles_parsed.append({})\n return styles_parsed\n\ndef validate_wms(environ):\n \"\"\"\\\n Common validation for WMS calls.\n \"\"\"\n query = parse_dict_querystring_lower(environ)\n\n # Check that REQUEST is present\n if 'request' not in query:\n msg = 'REQUEST not present in query string'\n raise WMSException(msg, 400)\n\n # Get REQUEST\n type_ = query.get('request')\n\n # Check that REQUEST is valid\n if type_ not in SUPPORTED_REQUESTS:\n msg = 'REQUEST=%s not supported; valid values=%s' % \\\n (type_, SUPPORTED_REQUESTS)\n if type_ == 'GetFeatureInfo':\n error_code = 'OperationNotSupported'\n raise WMSException(msg, 400, error_code)\n else:\n raise WMSException(msg, 400)\n\n query_valid = {'request': type_}\n\n return query_valid\n\ndef validate_get_capabilities(environ):\n \"\"\"\\\n Validate GetCapabilities call.\n \"\"\"\n # Input query string parameters\n query = parse_dict_querystring_lower(environ)\n\n required_args = ['service', 'request']\n for arg in required_args:\n if arg not in query:\n msg = '%s not present in query string' % arg.upper()\n raise WMSException(msg, 400)\n\n # Check that SERVICE=WMS\n service = query.get('service')\n if service != 'WMS':\n msg = 'SERVICE=WMS not present in query string'\n raise WMSException(msg, 400)\n\n # Validated output\n query_valid = {'service': service}\n\n return query_valid\n\n\ndef validate_get_map(environ, dataset, dataset_styles):\n \"\"\"\\\n Validate GetMap call.\n \"\"\"\n # Validated output\n query_valid = {}\n\n query = parse_dict_querystring_lower(environ)\n\n required_args = ['version', 'request', 'layers', 'styles', 'crs', 'bbox',\n 'width', 'height', 'format']\n for arg in required_args:\n if arg not in query:\n msg = '%s not present in query string' % arg.upper()\n raise WMSException(msg, 400)\n\n # Check that VERSION is VERSION=1.3.0\n version = query.get('version')\n if version != WMS_VERSION:\n msg = 'VERSION=%s not supported; valid value=%s' % \\\n (version, WMS_VERSION)\n raise WMSException(msg, 400)\n query_valid['version'] = version\n\n # Check that FORMAT is supported\n format_ = query.get('format')\n if format_ not in SUPPORTED_FORMATS:\n msg = 'FORMAT=%s not supported; valid values=%s' % \\\n (format_, SUPPORTED_FORMATS)\n error_code = 'InvalidFormat'\n raise WMSException(msg, 400, error_code)\n query_valid['format'] = format_\n\n # Check WIDTH\n w = query.get('width')\n try:\n w = int(w)\n except ValueError:\n msg = 'WIDTH=%s not an integer' % w\n raise WMSException(msg, 400)\n \n if w < MIN_WIDTH or w > MAX_WIDTH:\n msg = 'WIDTH=%s must be in range [%i; %i]' % \\\n (w, MIN_WIDTH, MAX_WIDTH)\n raise WMSException(msg, 400)\n query_valid['width'] = w\n\n # Check HEIGHT\n h = query.get('height')\n try:\n h = int(h)\n except ValueError:\n msg = 'HEIGHT=%s not an integer' % h\n raise WMSException(msg, 400)\n if h < MIN_HEIGHT or h > MAX_HEIGHT:\n msg = 'HEIGHT=%s must be in range [%i; %i]' % \\\n (h, MIN_HEIGHT, MAX_HEIGHT)\n raise WMSException(msg, 400)\n query_valid['height'] = h\n\n # Check CRS\n crs = query.get('crs')\n if crs not in SUPPORTED_CRS:\n msg = 'CRS=%s not supported; valid values=%s' % \\\n (crs, SUPPORTED_CRS)\n error_code = 'InvalidCRS'\n raise WMSException(msg, 400, error_code)\n query_valid['crs'] = crs\n\n # Check BBOX\n bbox_str = query.get('bbox')\n bbox = bbox_str.split(',')\n if len(bbox) != 4:\n msg = 'BBOX=%s must contain 4 comma separated values' % \\\n (bbox_str)\n raise WMSException(msg, 400)\n \n try:\n bbox = [float(v) for v in bbox]\n except ValueError:\n msg = 'BBOX=%s does not contain 4 numeric values' % \\\n (bbox_str)\n raise WMSException(msg, 400)\n\n # Reorder bounding box for EPSG:4326 which has lat/lon ordering\n if crs == 'EPSG:4326':\n bbox = [bbox[1], bbox[0], bbox[3], bbox[2]]\n\n query_valid['bbox'] = bbox\n\n\n # Check LAYERS and STYLES (basic check)\n layers_str = query.get('layers')\n layers = layers_str.split(',')\n styles_str = unquote(query.get('styles'))\n styles = parse_styles(styles_str, layers)\n\n if len(layers) != len(styles):\n msg = 'LAYERS=%s and STYLES=%s do not have same length' % \\\n (layers_str, styles_str)\n raise WMSException(msg, 400)\n\n # Check if layers and styles are defined\n # TODO: We should allow non-defined styles\n \"\"\"\n defined_layers = build_layers(dataset, dataset_styles)\n layer_names = [l['name'] for l in defined_layers]\n defined_styles = {l['name']: l['styles'] for l in \\\n defined_layers}\n for layer, style in zip(layers, styles):\n if layer not in layer_names:\n msg = '%s in LAYERS=%s not defined; valid values=%s' \\\n % (layer, layers_str, layer_names)\n error_code = 'LayerNotDefined'\n raise WMSException(msg, 400, error_code)\n defined_styles_layer = defined_styles[layer]\n if style != '' and style not in defined_styles_layer:\n msg = '%s in STYLE=%s not defined; valid values=%s' \\\n % (style, styles_str, defined_styles_layer)\n error_code = 'StylesNotDefined'\n raise WMSException(msg, 400, error_code)\n \"\"\"\n\n query_valid['layers'] = layers\n query_valid['styles'] = styles\n\n # Check TRANSPARENT\n transparent = query.get('transparent', 'FALSE')\n if transparent not in ['TRUE', 'FALSE']:\n msg = 'TRANSPARENT=%s must be TRUE or FALSE' % \\\n (transparent)\n raise WMSException(msg, 400)\n # Convert to bool\n transparent = asbool(transparent)\n query_valid['transparent'] = transparent\n\n # Check BGCOLOR (TODO: add check)\n bgcolor = query.get('bgcolor', '0xFFFFFF')\n query_valid['bgcolor'] = bgcolor\n\n # Check EXCEPTIONS\n exceptions = query.get('exceptions', 'XML')\n if exceptions not in SUPPORTED_EXCEPTIONS:\n msg = 'EXCEPTIONS=%s not supported; valid values=%s' % \\\n (exceptions, SUPPORTED_EXCEPTIONS)\n raise WMSException(msg, 400)\n query_valid['exceptions'] = exceptions\n\n # Check TIME (TODO: add check)\n time = query.get('time', None)\n # Time; if time is None we will use the nearest timestep available\n if time == 'current': time = None\n query_valid['time'] = time\n\n # Check ELEVATION (TODO: add check)\n elevation = query.get('elevation', None)\n if elevation is not None:\n try:\n elevation = float(elevation)\n except ValueError:\n msg = 'ELEVATION=%s not a float' % elevation\n raise WMSException(msg, 400)\n query_valid['elevation'] = elevation\n\n return query_valid\n\n\ndef validate_get_colorbar(environ):\n \"\"\"\\\n Validate GetColorbar call.\n \"\"\"\n query = parse_dict_querystring_lower(environ)\n\n required_args = []\n for arg in required_args:\n if arg not in query:\n msg = '%s not present in query string' % arg.upper()\n raise WMSException(msg, 400)\n\n # Check CMAP\n # TODO: check whether it is valid\n cmapname = query.get('cmap', None)\n\n # Check SIZE\n size = int(query.get('size', 200))\n min_size = max(MIN_WIDTH, MIN_HEIGHT)\n max_size = min(MAX_WIDTH, MAX_HEIGHT)\n if size < min_size or size > max_size:\n msg = 'SIZE=%s must be in range [%i; %i]' % \\\n (size, min_size, max_size)\n raise WMSException(msg, 400)\n\n # Get more query string parameters\n styles = query.get('styles', 'vertical').split(',')\n if 'horizontal' in styles:\n orientation = 'horizontal'\n else:\n orientation = 'vertical'\n if 'noticks' in styles:\n add_ticks = False\n else:\n add_ticks = True\n if 'centerlabels' in styles:\n center_labels = True\n else:\n center_labels = False\n\n # TODO: return real values\n query_valid = {}\n return query_valid\n\ndef validate(environ, dataset, styles):\n \"\"\"\\\n Validates and converts query string given WSGI environ input.\n Returns a dict containing the converted parameters. Throws\n a WMSException if a validation error is encountered.\n \"\"\"\n\n query = parse_dict_querystring_lower(environ)\n\n query_valid_master = validate_wms(environ)\n\n # Get REQUEST\n type_ = query.get('request')\n\n if type_ == 'GetCapabilities':\n query_valid = validate_get_capabilities(environ)\n elif type_ == 'GetMap':\n query_valid = validate_get_map(environ, dataset, styles)\n elif type_ == 'GetColorbar':\n query_valid = validate_get_colorbar(environ)\n elif type_ == 'GetMetadata':\n # TODO: Move GetMetadata validation here\n query_valid = {}\n else:\n msg = 'Internal Error'\n raise WMSException(msg, 500)\n\n query_valid_master.update(query_valid)\n return query_valid_master\n\n\ndef parse_dict_querystring_lower(environ):\n \"\"\"Parses query string into dict with keys in lower case.\"\"\"\n query = parse_dict_querystring(environ)\n # Convert WMS argument keys to lower case\n lowerlist = []\n for k,v in query.items():\n if k.lower() in WMS_ARGUMENTS:\n lowerlist.append(k)\n for k in lowerlist:\n v = query.pop(k)\n query[k.lower()] = v\n return query\n\ndef build_layers(dataset, supported_styles):\n grids = [grid for grid in walk(dataset, GridType) if \\\n gridutils.is_valid(grid, dataset)]\n # Store information for regular layers\n layers = []\n for grid in grids:\n # Style information\n standard_name = grid.attributes.get('standard_name', None)\n styles = []\n if standard_name is not None:\n styles = supported_styles.get(standard_name, [])\n\n # Spatial information\n lon = np.asarray(gridutils.get_lon(grid, dataset)[:])\n lat = np.asarray(gridutils.get_lat(grid, dataset)[:])\n minx, maxx = np.min(lon), np.max(lon)\n miny, maxy = np.min(lat), np.max(lat)\n bbox = [minx, miny, maxx, maxy]\n\n # Vertical dimension\n z = gridutils.get_vertical(grid)\n dims = grid.dimensions\n if z is not None:\n if z.name not in dims:\n z = None\n\n # Time information\n time = gridutils.get_time(grid)\n\n layer = {\n 'name': grid.name,\n 'title': grid.attributes.get('long_name', grid.name),\n 'abstract': grid.attributes.get('history', ''),\n 'styles': styles,\n 'bounding_box': bbox,\n 'vertical': z,\n 'time': time\n }\n layers.append(layer)\n \n # Find and store information for vector layers\n for u_grid in grids:\n u_standard_name = u_grid.attributes.get('standard_name', '')\n if u_standard_name.startswith('eastward_'):\n postfix = u_standard_name.split('_', 1)[1]\n standard_name = 'northward_' + postfix\n for v_grid in grids:\n v_standard_name = v_grid.attributes.get(\n 'standard_name', '')\n if standard_name == v_standard_name:\n styles = supported_styles.get(postfix, [])\n\n # Spatial information\n lon = gridutils.get_lon(u_grid, dataset)\n lat = gridutils.get_lat(u_grid, dataset)\n minx, maxx = np.min(lon), np.max(lon)\n miny, maxy = np.min(lat), np.max(lat)\n bbox = [minx, miny, maxx, maxy]\n\n # Vertical dimension\n z = gridutils.get_vertical(u_grid)\n dims = u_grid.dimensions\n if z is not None:\n if z.name not in dims:\n z = None\n\n # Time information\n time = gridutils.get_time(u_grid)\n\n layer = {\n 'name': ':'.join([u_grid.name, v_grid.name]),\n 'title': postfix,\n 'abstract': grid.attributes.get('history', ''),\n 'styles': styles,\n 'bounding_box': bbox,\n 'vertical': z,\n 'time': time\n }\n layers.append(layer)\n\n return layers\n","sub_path":"src/pydap/responses/wms/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":15736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"237841046","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"Este módulo contiene al bot y lo ejecuta si se corre el script.\"\"\"\n\nimport platform\nimport sqlite3\nimport sys\nimport random\nimport re\nimport yaml\nimport logger\nimport discord\nfrom models import db, Post, Ban, Redditor, Meme\nfrom tasks import posts_loop\n\n__author__ = 'Nicolás Santisteban, Jonathan Gutiérrez'\n__license__ = 'MIT'\n__version__ = '0.1.4'\n__status__ = \"Producción\"\n\n\nclass Alexis(discord.Client):\n \"\"\"Contiene al bot e inicializa su funcionamiento.\"\"\"\n def __init__(self, **options):\n super().__init__(**options)\n\n self.log = logger.get_logger('Alexis')\n\n db.connect()\n db.create_tables([Post, Ban, Redditor, Meme], True)\n\n try:\n with open('config.yml', 'r') as file:\n self.config = yaml.safe_load(file)\n except Exception as ex:\n self.log.exception(ex)\n raise\n\n def init(self):\n \"\"\"Inicializa al bot\"\"\"\n self.log.info('\"Alexis Bot\" versión %s de %s.', __version__, __status__)\n self.log.info('Python %s en %s.', sys.version, sys.platform)\n self.log.info(platform.uname())\n self.log.info('Soporte SQLite3 para versión %s.', sqlite3.sqlite_version)\n self.log.info('------')\n\n if 'default_memes' in self.config and len(self.config['default_memes']) > 0:\n self.log.info('Inicializando base de datos...')\n for meme_name, meme_cont in self.config['default_memes'].items():\n Meme.get_or_create(name=meme_name, content=meme_cont)\n\n self.log.info('Conectando...')\n\n try:\n self.loop.create_task(posts_loop(self))\n self.run(self.config['token'])\n except Exception as ex:\n self.log.exception(ex)\n raise\n\n async def on_ready(self):\n \"\"\"Esto se ejecuta cuando el bot está conectado y listo\"\"\"\n self.log.info('Conectado como:')\n self.log.info(self.user.name)\n self.log.info(self.user.id)\n self.log.info('------')\n await self.change_presence(game=discord.Game(name=self.config['playing']))\n\n async def on_message(self, message):\n \"\"\"Método ejecutado cada vez que se recibe un mensaje\"\"\"\n text = message.content\n author = message.author.name\n chan = message.channel\n is_pm = message.server is None\n is_owner = 'owners' in self.config and message.author.id in self.config['owners']\n\n # !ping\n #if text == '!ping':\n # await self.send_message(chan, 'pong!')\n\n # !version\n if text == '!version' or text == '!info':\n info_msg = \"```\\nAutores: {}\\n\\nVersión: {}\\n\\nEstado: {}```\"\n await self.send_message(chan, info_msg.format(__author__, __version__, __status__))\n\n # !callate\n elif text == '!callate':\n await self.send_message(chan, 'http://i.imgur.com/nZ72crJ.jpg')\n\n # !choose\n elif text.startswith('!choose '):\n options = text[8:].split(\"|\")\n if len(options) < 2:\n return\n\n # Validar que no hayan opciones vacías\n for option in options:\n if option.strip() == '':\n return\n\n answer = random.choice(options).strip()\n text = 'Yo elijo **{}**'.format(answer)\n await self.send_message(chan, text)\n\n # !f\n elif text.startswith('!f'):\n if text.strip() == '!f':\n text = \"**{}** ha pedido respetos :hearts:\".format(author)\n await self.send_message(chan, text)\n elif text.startswith('!f ') and len(text) >= 4:\n respects = text[3:]\n text = \"**{}** ha pedido respetos por **{}** :hearts:\".format(author, respects)\n await self.send_message(chan, text)\n\n # !ban (no PM)\n elif text.startswith('!ban '):\n if is_pm:\n await self.send_message(chan, 'me estai weando?')\n return\n\n for mention in message.mentions:\n if 'owners' in self.config and mention.id in self.config['owners']:\n text = 'nopo wn no hagai esa wea'\n await self.send_message(chan, text)\n elif random.randint(0, 1):\n user, _ = Ban.get_or_create(user=mention, server=message.server)\n update = Ban.update(bans=Ban.bans + 1)\n update = update.where(Ban.user == mention, Ban.server == message.server)\n update.execute()\n\n if user.bans + 1 == 1:\n text = 'Uff, ¡**{}** se fue baneado por primera vez!'.format(mention.name)\n else:\n text = '¡**{}** se fue baneado otra vez y registra **{} baneos**!'\n text = text.format(mention.name, user.bans + 1)\n await self.send_message(chan, text)\n else:\n text = '¡**{}** se salvo del ban de milagro!'.format(mention.name)\n await self.send_message(chan, text)\n\n # !resetban\n elif text.startswith(\"!resetban \"):\n if not is_owner:\n await self.send_message(chan, 'USUARIO NO AUTORIZADO, ACCESO DENEGADO')\n return\n\n if len(text.split(' ')) > 2 or len(message.mentions) < 1:\n await self.send_message(chan, 'Formato: !resetban ')\n return\n\n mention = message.mentions[0]\n user, _ = Ban.get_or_create(user=mention, server=message.server)\n user.bans = 0\n user.save()\n\n await self.send_message(chan, 'Bans reiniciados xd')\n\n # !redditor\n elif text.startswith('!redditor '):\n user = text[10:].split(' ')[0].lower().strip()\n\n if user.startswith('/u/'):\n user = user[3:]\n if not re.match('^[a-zA-Z0-9_-]*$', user):\n return\n\n redditor, _ = Redditor.get_or_create(name=user)\n\n if redditor.posts > 0:\n suffix = 'post' if redditor.posts == 1 else 'posts'\n text = '**/u/{name}** ha creado **{num}** {suffix}.'\n text = text.format(name=user, num=redditor.posts, suffix=suffix)\n await self.send_message(chan, text)\n else:\n text = '**/u/{name}** no ha creado ningún post.'\n text = text.format(name=user)\n await self.send_message(chan, text)\n\n # ! | ¡\n elif text.startswith('! ') or text.startswith('¡'):\n meme_query = ''\n if text.startswith('! '):\n meme_query = text[2:]\n else:\n meme_query = text[1:]\n\n try:\n meme = Meme.get(Meme.name == meme_query)\n await self.send_message(chan, meme.content)\n except Meme.DoesNotExist:\n pass\n\n elif text.startswith('!set '):\n meme_query = text[5:].strip().split(' ')\n\n if not is_owner:\n await self.send_message(chan, 'USUARIO NO AUTORIZADO, ACCESO DENEGADO')\n return\n\n if len(meme_query) < 2:\n await self.send_message(chan, 'Formato: !set ')\n return\n\n meme_name = meme_query[0].strip()\n meme_cont = ' '.join(meme_query[1:]).strip()\n meme, created = Meme.get_or_create(name=meme_name)\n meme.content = meme_cont\n meme.save()\n\n if created:\n msg = 'Valor **{name}** creado'.format(name=meme_name)\n self.log.info('Meme %s creado con valor: \"%s\"', meme_name, meme_cont)\n else:\n msg = 'Valor **{name}** actualizado'.format(name=meme_name)\n self.log.info('Meme %s actualizado a: \"%s\"', meme_name, meme_cont)\n\n await self.send_message(chan, msg)\n\n elif text.startswith('!unset '):\n meme_name = text[7:].strip()\n\n if not is_owner:\n await self.send_message(chan, 'USUARIO NO AUTORIZADO, ACCESO DENEGADO')\n return\n\n if meme_name == \"\":\n await self.send_message(chan, 'Formato: !unset ')\n return\n\n try:\n meme = Meme.get(name=meme_name)\n meme.delete_instance()\n msg = 'Valor **{name}** eliminado'.format(name=meme_name)\n await self.send_message(chan, msg)\n self.log.info('Meme %s eliminado', meme_name)\n except Meme.DoesNotExist:\n msg = 'El valor con nombre {name} no existe'.format(name=meme_name)\n await self.send_message(chan, msg)\n \n elif text == '!list':\n if not is_owner:\n await self.send_message(chan, 'USUARIO NO AUTORIZADO, ACCESO DENEGADO')\n return\n \n namelist = []\n for item in Meme.select().iterator():\n namelist.append(item.name)\n\n word = 'valor' if len(namelist) == 1 else 'valores'\n resp = 'Hay {} {}: {}'.format(len(namelist), word, ', '.join(namelist))\n await self.send_message(chan, resp)\n\nif __name__ == '__main__':\n Alexis().init()\n","sub_path":"alexis.py","file_name":"alexis.py","file_ext":"py","file_size_in_byte":9427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"529658600","text":"import json\r\nimport os\r\nimport boto3\r\n\r\nDYNAMODB_TABLE = os.environ['DYNAMODB_TABLE']\r\ndynamodb = boto3.resource('dynamodb')\r\n\r\ndef lambda_handler(event, context):\r\n table = dynamodb.Table(DYNAMODB_TABLE)\r\n \r\n body = json.loads(event['body'])\r\n \r\n item = table.get_item(\r\n Key = {\r\n 'user_id' : body['user_id']\r\n }\r\n )\r\n \r\n favs = item['Item'][\"favorites\"]\r\n favs.remove(body['book_id'])\r\n \r\n response = table.update_item(\r\n Key = {\r\n 'user_id' : body['user_id']\r\n },\r\n UpdateExpression=\"set favorites=:f\",\r\n ExpressionAttributeValues={\r\n ':f': favs\r\n },\r\n ReturnValues=\"UPDATED_NEW\"\r\n )\r\n \r\n return {\r\n 'statusCode': 200,\r\n 'headers': {\r\n 'Access-Control-Allow-Headers': 'Content-Type',\r\n 'Access-Control-Allow-Origin': '*',\r\n 'Access-Control-Allow-Methods': 'OPTIONS,POST,GET, DELETE'\r\n },\r\n }\r\n ","sub_path":"lambda/deleteFavorites.py","file_name":"deleteFavorites.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"143855251","text":"from garm.script_init import *\nfrom sites.models import Catalog_el\nimport json\n\npath = '.'+'/'.join((__file__.split('/'))[:-1]) + '/' + 'sites/pages/elevel/api/v1/products.json'\n\napp.d(json.dumps([\" 1 \"]))\n\nfile = open(path, 'w')\n\nfirst = True\nfile.write('[')\n\nidx = 5\n\nfor p in Catalog_el.all().load('brand').load('parent'):\n if first:\n first = False\n else:\n file.write(',')\n\n js = json.dumps({\n 'Id': p.id,\n # 'ES_ID': p.Product_id_ES,\n 'Active': 1 if p.active and (not p.parent or p.parent.tree_active) else 0,\n 'Name': p.name,\n 'Marking' :p.marking,\n 'Multiplicity' :p.buy_ratio,\n 'Break' : 0 if p.measure_ratio>1 else 1,\n 'Producer': p.brand.name if p.brand else ''\n })\n # app.d(js)\n file.write(js)\n# file.write()\n#\n app.d(p.id,32)\n # idx-=1\n # if not idx: break\n\nfile.write(']')\n\nfile.close()\n","sub_path":"v1_export.py","file_name":"v1_export.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"257549705","text":"#!/usr/bin/env python\n# coding=utf-8\n#\n# Author: Lucas\n# Date: 2018-08-26 18:06:39\n\"\"\"\nInsertion Sort\n\"\"\"\n\n\ndef insertion_sort(a):\n for i in range(1, len(a)):\n j = i\n while j > 0 and a[j-1] > a[j]:\n a[j], a[j-1] = a[j-1], a[j]\n j -= 1\n return a\n\n\ndef smoke_test():\n import random\n a = [random.randint(0, 10000) for _ in range(1000)]\n assert sorted(a) == insertion_sort(a)\n\n\nif __name__ == '__main__':\n smoke_test()\n","sub_path":"src_py/alg_2_2.py","file_name":"alg_2_2.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"285505976","text":"4# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport numpy as np\n\nmypwd='/net/home/talavera/radial-inversion/03s00/09s02'\nn = int(open('%s/matrix.dat'%mypwd).readline().rstrip())\nmatrix=np.loadtxt('%s/matrix.dat'%mypwd, skiprows=1).view(dtype=np.complex128).reshape((n,-1))\n\nrr=np.loadtxt('%s/rr.dat'%mypwd).view(dtype=np.complex128).reshape((n,-1))\nrr1=np.loadtxt('%s/rr1.dat'%mypwd).view(dtype=np.complex128).reshape((n,-1))\nrr1_test=np.conjugate(rr).T\n\nI = np.dot(rr1,rr)\nI_test = np.dot(rr1_test,rr)\ntrace = np.trace(I_test) - np.trace(I)","sub_path":"frospy/tests/todo/Su/get_rr_ww.py","file_name":"get_rr_ww.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"391959896","text":"def run():\n\t#take input\n\tsize = int(input())\n\tinput_list = []\n\tfor i in range(size):\n\t\tinput_list.append(int(input()))\n\t#input completed\n\tif len(input_list) > 0:\n\t\tprint(LISLengthDPWithBinarySearch(input_list))\n\n\n\n\ndef LISLengthBruteForce():\n\ttest_cases = int(input())\n\tinput_list = []\n\tfor t in range(test_cases):\n\t\tinput_list.append(int(input()))\n\n\tall_length = []\n\tfor number in input_list:\n\t\tlast = number\n\t\tsub = [last]\n\t\tfor n in input_list[input_list.index(last):]:\n\t\t\tif n > last:\n\t\t\t\tsub.append(n)\n\t\t\t\tlast = n\n\t\tall_length.append(len(sub))\n\treturn max(all_length)\n\n\ndef LISLengthRecursive(sequence, previous, current):\n\t\"\"\" Computes the length of the longest increasing subsequence\n\t time complexity: O(2^n)\n\t space complexity: O(1)\n\n\t >>> LISLengthRecursive([4,3,1,5,2,6,9,12,8,15], 0, 0)\n\t 6\n\t >>> LISLengthRecursive([10,9,2,5,3,7,101,18], 0, 0)\n\t 4\n\t >>> LISLengthRecursive([2,7,4,3,8], 0, 0)\n\t 3\n\t\"\"\"\n\tif current == len(sequence):\n\t\treturn 0;\n\n\ttaken, not_taken = 0, 0\n\tif sequence[current] > previous:\n\t\ttaken = 1 + LISLengthRecursive(sequence, sequence[current], current+1)\n\tnot_taken = LISLengthRecursive(sequence, previous, current+1)\n\n\treturn max(taken, not_taken)\n\ndef LISLengthDP(sequence):\n\t\"\"\" computes the length of the longest increasing subsequence\n\t time complexity: O(n^2)\n\t space complexity: O(n)\n\n\t >>> LISLengthDP([4,3,1,5,2,6,9,12,8,15])\n\t 6\n\t >>> LISLengthDP([10,9,2,5,3,7,101,18])\n\t 4\n\t >>> LISLengthDP([2,7,4,3,8])\n\t 3\n\t\"\"\"\n\tdp = [1] * len(sequence)\n\tj = 1\n\twhile j < len(sequence):\n\t\ti = 0\n\t\twhile i < j:\n\t\t\tif sequence[j] > sequence[i]:\n\t\t\t\tif dp[j] < dp[i] + 1:\n\t\t\t\t\tdp[j] = dp[i] + 1\n\t\t\ti = i + 1\n\t\tj = j + 1\n\treturn max(dp)\n\ndef binary_index(sequence, dp, end, key):\n\tstart = 0\n\tlength = end\n\twhile start <= end:\n\t\tmiddle = int((start + end) / 2)\n\t\tif middle < length and sequence[dp[middle]] < key and key <= sequence[dp[middle+1]]:\n\t\t\treturn middle+1\n\t\telif key > sequence[dp[middle]]:\n\t\t\tstart = middle + 1\n\t\telse:\n\t\t\tend = middle - 1\n\n\treturn end\n\ndef LISLengthDPWithBinarySearch(sequence):\n\t\"\"\"computes the length of the longest increasing subsequence\n\t time complexity: O(n log n)\n\t space complexity: O(n)\n\n\t >>> LISLengthDPWithBinarySearch([4,3,1,5,2,6,9,12,8,15])\n\t 6\n\t >>> LISLengthDPWithBinarySearch([10,9,2,5,3,7,101,18])\n\t 4\n\t >>> LISLengthDPWithBinarySearch([2,7,4,3,8])\n\t 3\n\t >>> LISLengthDPWithBinarySearch([2,4,-1,5,8,1,9,10])\n\t 6\n\n\t\"\"\"\n\tdp = [0]\n\tmax_length = 0\n\ti = 1\n\twhile i < len(sequence):\n\t\tif sequence[i] < sequence[dp[0]]:\n\t\t\tdp[0] = i\n\t\telif sequence[i] >= sequence[dp[max_length]]:\n\t\t\tmax_length = max_length + 1\n\t\t\tdp.insert(max_length, i)\n\t\telse:\n\t\t\t#do a binary search to find index\n\t\t\tdp[binary_index(sequence, dp, max_length, sequence[i])] = i\n\t\ti = i + 1\n\treturn max_length+1\n\t\t\n\nrun()\n#adding doctest\nif __name__ == \"__main__\":\n\timport doctest\n\tdoctest.testmod()\n\n\t","sub_path":"longest_inc.py","file_name":"longest_inc.py","file_ext":"py","file_size_in_byte":2917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"304048999","text":"from Gamer import Gamer\n\n\nclass Dealer(Gamer):\n\n def __init__(self, name=\"\", A=None, display=False):\n super(Dealer, self).__init__(name, A, display)\n self.role = 'Dealer'\n self.policy = self.dealer_policy\n\n def first_card_value(self):\n if self.cards is None or len(self.cards) == 0:\n return 0\n return self._value_of(self.cards[0])\n\n def dealer_policy(self, Dealer=None):\n action = \"\"\n dealer_points, _ = self.get_points()\n if dealer_points >= 17:\n action = self.A[1]\n else:\n action = self.A[0]\n return action\n\n def cards_info(self):\n super().cards_info(self.role)\n","sub_path":"Model_free/MC_21/Dealer.py","file_name":"Dealer.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"39451135","text":"\"\"\"Measure ping latency to configured hosts.\"\"\"\nimport re\nimport subprocess\nfrom collections import defaultdict\n\nfrom schema import Optional\n\nfrom netrics import task\n\nfrom .common import default, require_lan\n\n\n#\n# ping exit codes\n#\n# if ping returns any code other than the below something is *very* wrong\n#\n# (the error code 2 is included -- unclear if ping *can* return anything higher than that.)\n#\nPING_CODES = {\n 0, # success\n 1, # no reply\n 2, # error (e.g. dns)\n}\n\n\n#\n# params schema\n#\n# input -- a (deserialized) mapping -- is entirely optional.\n#\n# a dict, of the optional param keys, their defaults, and validations of\n# their values, is given below, (extending the globally-supported input\n# parameter schema given by `task.schema`).\n#\nPARAMS = task.schema.extend('ping_latency', {\n # destinations: (ping): list of hosts\n # OR mapping of hosts to their labels (for results)\n Optional('destinations',\n default=default.PING_DESTINATIONS): task.schema.DestinationCollection(),\n\n # count: (ping): natural number\n Optional('count', default='10'): task.schema.NaturalStr('count'),\n\n # interval: (ping): int/decimal seconds no less than 2ms\n Optional('interval',\n default='0.25'): task.schema.BoundedRealStr('interval',\n 'seconds may be no less than 0.002 (2ms)',\n lambda interval: interval >= 0.002),\n\n # deadline: (ping): positive integer seconds\n Optional('deadline', default='5'): task.schema.PositiveIntStr('deadline', 'seconds'),\n})\n\n\n@task.param.require(PARAMS)\n@require_lan\ndef main(params):\n \"\"\"Measure ping latency to configured hosts.\n\n The local network is queried first to ensure operation.\n (See: `require_lan`.)\n\n Ping queries are then executed, in parallel, to each configured host\n (`destinations`) according to configured ping command arguments:\n `count`, `interval` and `deadline`.\n\n Ping outputs are parsed into structured results and written out\n according to configuration (`result`).\n\n \"\"\"\n # parallelize pings\n processes = {\n destination: subprocess.Popen(\n (\n 'ping',\n '-c', params.count,\n '-i', params.interval,\n '-w', params.deadline,\n destination,\n ),\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n text=True,\n ) for destination in params.destinations\n }\n\n # wait and collect outputs\n outputs = {destination: process.communicate() for (destination, process) in processes.items()}\n\n # check for exceptions\n failures = [\n (destination, process, outputs[destination])\n for (destination, process) in processes.items()\n if process.returncode not in PING_CODES\n ]\n\n if failures:\n total_failures = len(failures)\n\n # directly log first 3 failures\n for (fail_count, (destination, process, (stdout, stderr))) in enumerate(failures[:3], 1):\n task.log.critical(\n dest=destination,\n status=f'Error ({process.returncode})',\n failure=f\"({fail_count}/{total_failures})\",\n args=process.args[:-1],\n stdout=stdout,\n stderr=stderr,\n )\n\n if fail_count < total_failures:\n task.log.critical(\n dest='...',\n status='Error (...)',\n failure=f\"(.../{total_failures})\",\n args='...',\n stdout='...',\n stderr='...',\n )\n\n return task.status.software_error\n\n # log summary/general results\n statuses = defaultdict(int)\n for process in processes.values():\n statuses[process.returncode] += 1\n\n task.log.info({'dest-status': statuses})\n\n # parse detailed results\n results = {\n destination: parse_output(stdout)\n for (destination, (stdout, _stderr)) in outputs.items()\n }\n\n # label results\n if isinstance(params.destinations, dict):\n results = {\n params.destinations[destination]: result\n for (destination, result) in results.items()\n }\n\n # flatten results\n if params.result.flat:\n results = {f'{label}_{feature}': value\n for (label, data) in results.items()\n for (feature, value) in data.items()}\n\n # write results\n task.result.write(results,\n label=params.result.label,\n annotate=params.result.annotate)\n\n return task.status.success\n\n\ndef parse_output(output):\n \"\"\"Parse ping output and return dict of results.\"\"\"\n\n # Extract RTT stats\n rtt_match = re.search(\n r'rtt [a-z/]* = ([0-9.]*)/([0-9.]*)/([0-9.]*)/([0-9.]*) ms',\n output\n )\n\n rtt_values = [float(value) for value in rtt_match.groups()] if rtt_match else [-1.0] * 4\n\n rtt_keys = ('rtt_min_ms', 'rtt_avg_ms', 'rtt_max_ms', 'rtt_mdev_ms')\n\n rtt_stats = zip(rtt_keys, rtt_values)\n\n # Extract packet loss stats\n pkt_loss_match = re.search(r', ([0-9.]*)% packet loss', output, re.MULTILINE)\n\n pkt_loss = float(pkt_loss_match.group(1)) if pkt_loss_match else -1.0\n\n # Return combined dict\n return dict(rtt_stats, packet_loss_pct=pkt_loss)\n","sub_path":"src/netrics/measurement/ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":5400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"255168171","text":"\"\"\" A page functions for Availability Zone\n\n\n:var list_page: A :py:class:`cfme.web_ui.Region` object describing elements on the list page.\n:var details_page: A :py:class:`cfme.web_ui.Region` object describing elements on the detail page.\n\"\"\"\n\nfrom cfme.web_ui import Region, SplitTable\n\n\n# Page specific locators\nlist_page = Region(\n locators={\n 'zone_table': SplitTable(header_data=('//div[@class=\"xhdr\"]/table/tbody', 1),\n body_data=('//div[@class=\"objbox\"]/table/tbody', 1))\n },\n title='CloudForms Management Engine: Cloud Providers')\n\n\ndetails_page = Region(infoblock_type='detail')\n","sub_path":"cfme/cloud/availability_zone.py","file_name":"availability_zone.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"311760248","text":"f = open(\"list_person_annotation_1.txt\",\"r\")\nimag= \"\"\nx=0\ny=0\nwidth_bbox =0\nheight_bbox =0\nheight_img = 608\nwidth_img = 608\nfor line in f:\n print(line)\n lnhalf1 = line.split('.')[0]\n print((lnhalf1[2:]))\n imag = (lnhalf1[2:])\n lnhalf2 = line.split('.')[1]\n x = int((lnhalf2.split(',')[1]).strip())\n y = int((lnhalf2.split(',')[2]).strip())\n width_bbox = int((lnhalf2.split(',')[3]).strip())\n height_bbox = int(((lnhalf2.split(',')[4]).strip())[:-1])\n #height_temp = lnhalf2.split(',')[5]\n #width_temp = lnhalf2.split(',')[6]\n #height_img = int(height_temp.split('=')[1])\n #width_img = int(width_temp.split('=')[1])\n # CALICULATIONS STARTS FROM HERE\n value_1 = x/width_img\n value_2 = y/height_img\n value_3 = width_bbox/width_img\n value_4 = height_bbox/height_img\n print(\"height =\",height_img)\n print(\"width =\",width_img)\n print(x)\n print(y)\n print(width_bbox)\n print(height_bbox)\n print(value_1)\n print(value_2)\n print(value_3)\n print(value_4)\n f1 = open(imag+\".txt\",\"a\")\n f1.write('1')\n f1.write(\" \")\n f1.write(str(value_1)[:8])\n f1.write(\" \")\n f1.write(str(value_2)[:8])\n f1.write(\" \")\n f1.write(str(value_3)[:8])\n f1.write(\" \")\n f1.write(str(value_4)[:8])\n f1.write(\"\\n\")\n \n \n \n \n","sub_path":"Extract_person.py","file_name":"Extract_person.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"288404516","text":"\"\"\"\nlibpad is used to sandbox code using codepad.org.\n\n\"\"\"\n\nfrom ehp import *\nimport requests\n\ndef post(data, lang, opt=False):\n \"\"\"\n Used to post code onto codepad.org.\n\n Example:\n url, data = libpad.post('print \"hi\"', 'python', opt=True)\n Would print the redirected url and the output \"hi\".\n \"\"\"\n\n URL = 'http://codepad.org'\n lang_map = {\n 'c':'C',\n 'cpp':'C++',\n 'd':'D',\n 'haskell':'Haskell',\n 'lua':'Lua',\n 'ocaml':'OCaml',\n 'php':'PHP',\n 'perl':'Perl',\n 'python':'Python',\n 'ruby':'Ruby',\n 'scheme':'Scheme',\n 'tcl':'Tcl'\n }\n\n head = {\n 'code':data,\n 'lang':lang_map.get(lang, 'Plain Text'),\n 'submit':'Submit'\n }\n\n head['run'] = opt\n req = requests.post(URL, data=head)\n return req.url, req.text\n\ndef sandbox(code, lang):\n \"\"\"\n Used to sandbox code.\n\n Example:\n\n url, output = libpad.sandbox('print \"hello world\"', lang='python')\n \"\"\"\n\n url, data = post(code, lang, opt=True)\n html = Html()\n dom = html.feed(data)\n\n code = dom.find('div', ('class', 'code'))\n code = list(list(code)[1].find('pre'))[1].text()\n \n return url, code\n\n\n\n","sub_path":"libpad.py","file_name":"libpad.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"122366071","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.http import Request, FormRequest\nfrom signal_project.items import SignalProjectItem\nimport time, datetime, csv, random, base64, re, json\nfrom time import sleep\nimport os\nimport os.path\nfrom scrapy.selector import Selector\nimport sys\nimport re\n\nclass SinalindunaSpider(scrapy.Spider):\n name = 'sinalInduna'\n allowed_domains = ['www.signal-iduna.de']\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n \n }\n\n start_urls = ['http://www.signal-iduna.de/']\n\n \n def start_requests(self):\n \n letters = 'abcdefghijklmnopqrstuvwxyz'\n url_list = ['{}{}%'.format(first_letter, second_letter) for first_letter in letters for second_letter in letters]\n for param in url_list:\n url = \"https://www.signal-iduna.de/adp-suche?singleSearch={}\".format(param)\n req = Request(url=url, callback=self.get_list, headers=self.headers, dont_filter=True)\n yield req\n \n def get_list(self, response):\n \n result_list = response.xpath('//h2[contains(@class, \"heading-primary\")]')\n if result_list != '':\n \n for result in result_list:\n \n url = response.urljoin(result.xpath('./a/@href').extract_first())\n req = Request(url=url, callback=self.parse_information, headers=self.headers, dont_filter=True)\n yield req \n\n next_link = response.xpath('//ul[@class=\"pagination\"]/li/a[@rel = \"next\"]/@href').extract_first('')\n if next_link != '':\n \n req = Request(url=response.urljoin(next_link), callback=self.get_list, headers=self.headers, dont_filter=True)\n yield req\n \n def parse_information(self, response):\n \n page_url = response.url\n item = SignalProjectItem()\n try:\n first_name = response.xpath('//h3[@class=\"text-big\"]/span[@class=\"hide767\"]/text()').extract_first().strip()\n last_name = response.xpath('//h3[@class=\"text-big\"]/span[@class=\"adp_name\"]/text()').extract_first().strip()\n street_number = response.xpath('//p[@class=\"adressinfo\"]/text()').extract()[0].split('\\n')\n street_number = ' '.join([s.strip() for s in street_number])\n post_code = response.xpath('//p[@class=\"adressinfo\"]/text()').extract()[1].split('\\n')\n post_code = ' '.join([s.strip() for s in post_code])\n \n except Exception as e:\n print(e)\n first_name = ''\n last_name = ''\n street_number = ''\n post_code = ''\n try:\n phone = ''\n Mobile = ''\n Fax = ''\n person_element = response.xpath('//p[@class=\"telefonnummern\"]/text()').extract()\n \n for person in person_element:\n \n if 'tel' in person.lower():\n phone = re.sub(r\"[^\\d]\", \"\", person)\n phone = phone[0:4] + ' ' + phone[4:]\n \n elif 'fax' in person.lower():\n Fax = re.sub(r\"[^\\d]\", \"\", person)\n Fax = Fax[0:4] + ' ' + Fax[4:]\n \n elif 'mobil' in person.lower():\n Mobile = re.sub(r\"[^\\d]\", \"\", person)\n Mobile = Mobile[0:4] + ' ' + Mobile[4:]\n else:\n pass\n \n except Exception as e:\n print(e)\n phone = ''\n Fax = ''\n Mobile = '' \n try:\n register_name = response.xpath('//strong[contains(text(), \"Registerabruf:\")]/following-sibling::strong[1]/text()').extract_first().strip()\n except Exception as e:\n print(e)\n register_name = ''\n item['page_url'] = page_url \n item[\"first_name\"] = first_name\n item[\"last_name\"] = last_name\n item[\"street_number\"] = street_number\n item[\"post_code\"] = post_code\n item[\"phone\"] = phone\n item[\"Fax\"] = Fax\n item[\"Mobile\"] = Mobile\n item[\"register_name\"] = register_name\n\n print('first_name=',first_name)\n print('last_name=',last_name)\n print('street_number=',street_number)\n print('post_code=',post_code)\n print('pone_number=',phone)\n print('Fax=', Fax)\n print('Mobile=', Mobile)\n print('register_name=', register_name)\n\n yield item\n","sub_path":"signal_project/spiders/sinalInduna.py","file_name":"sinalInduna.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"381797440","text":"from datetime import datetime\nfrom multiprocessing import Pool\nfrom nltk.classify.naivebayes import NaiveBayesClassifier\nfrom nltk.corpus import sentiwordnet\nfrom nltk.corpus import wordnet\nfrom nltk.tag import pos_tag\nfrom nltk.data import load\nfrom nltk.tokenize import TreebankWordTokenizer\nfrom pickle import HIGHEST_PROTOCOL\nfrom _pickle import dump\nfrom random import shuffle\nimport ujson\ntokenizerFrases = load('tokenizers/punkt/english.pickle')\ntokenizerPalavras = TreebankWordTokenizer()\narquivoPositivos = open('positivos.json')\npositivos = ujson.load(arquivoPositivos)\narquivoPositivos.close()\narquivoNeutros = open('neutros.json')\nneutros = ujson.load(arquivoNeutros)\narquivoNeutros.close()\narquivoNegativos = open('negativos.json')\nnegativos = ujson.load(arquivoNegativos)\narquivoNegativos.close()\ndef processoFeatures(resposta):\n\tfrases = tokenizerFrases.tokenize(resposta['corpo'])\n\tpalavras = []\n\tpalavrasTexto = {}\n\tfor frase in frases:\n\t\tpalavrasTemp = tokenizerPalavras.tokenize(frase)\n\t\tfor palavra in palavrasTemp:\n\t\t\tpalavrasTexto[palavra] = True\n\tposTags = pos_tag(palavras)\n\tpositivo = 0\n\tnegativo = 0\n\tfor palavra, tag in posTags:\n\t\tsynsets = None\n\t\tif tag.startswith('J'):\n\t\t\tsynsets = sentiwordnet.senti_synsets(palavra, wordnet.ADJ)\n\t\telif tag.startswith('V'):\n\t\t\tsynsets = sentiwordnet.senti_synsets(palavra, wordnet.VERB)\n\t\telif tag.startswith('N'):\n\t\t\tsynsets = sentiwordnet.senti_synsets(palavra, wordnet.NOUN)\n\t\telif tag.startswith('R'):\n\t\t\tsynsets = sentiwordnet.senti_synsets(palavra, wordnet.ADV)\n\t\telse:\n\t\t\tsynsets = sentiwordnet.senti_synsets(palavra, '')\n\t\tif synsets != None:\n\t\t\tsynsets = list(synsets)\n\t\t\tif len(synsets) > 0:\n\t\t\t\tsynset = synsets[0]\n\t\t\t\tpositivo = positivo + synset.pos_score()\n\t\t\t\tnegativo = negativo + synset.neg_score()\n\tif positivo > negativo:\n\t\treturn (palavrasTexto, 'positivo')\n\telif negativo > positivo:\n\t\treturn (palavrasTexto, 'negativo')\n\telse:\n\t\treturn (palavrasTexto, 'neutro')\npool = Pool()\nfeatures = pool.map(processoFeatures, positivos + negativos + neutros)\npool.terminate()\npool.close()\nshuffle(features)\ncomeco = datetime.now()\nclassificador = NaiveBayesClassifier.train(features)\ntempo = datetime.now() - comeco\narquivoMedicoes = open('medicoes_criar_classificador_sem_stopwords.txt', 'w')\narquivoMedicoes.write('Tempo de Execução = ' + str(tempo) + '\\n\\nFeatures Importantes:')\nfeaturesImportantes = classificador.most_informative_features(10)\nfor palavra, booleano in featuresImportantes:\n\tarquivoMedicoes.write('\\n' + palavra)\narquivoMedicoes.close()\narquivoClassificador = open('classificador.pickle', 'wb')\ndump(classificador, arquivoClassificador, protocol=HIGHEST_PROTOCOL)\narquivoClassificador.close()","sub_path":"Análise de Sentimento - SentiWordNet com Naive Bayes/criar_classificador_com_stopwords.py","file_name":"criar_classificador_com_stopwords.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"622199128","text":"from mod_python import apache\n\npinyin = apache.import_module('src/pinyin')\nconverter = pinyin.Converter()\n\ndef index(text, fmt = 'df', sc = 'true', pp = 'false', fuzzy = '0'):\n\n if sc == 'true':\n sc = True\n elif sc == 'false':\n sc = False\n else:\n sc = True\n\n if pp == 'true':\n pp = True\n elif pp == 'false':\n pp = False\n else:\n pp = False\n\n try:\n fuzzy = int(fuzzy)\n except ValueError:\n fuzzy = 0\n\n global converter\n return converter.convert(text, fmt, sc, pp, fuzzy)","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"430969540","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef fetch_rar_lineup():\n print(\"Fetching RaR bands...\")\n lineup_url = \"https://www.rock-am-ring.com/lineup\"\n r = requests.get(lineup_url)\n lineup_page = BeautifulSoup(r.text, features=\"html.parser\")\n band_tags = lineup_page.select(\"div.Appearance > a > h5 > span\")\n bands = [tag.string for tag in band_tags]\n print(\"Finished fetching of RaR bands.\")\n print(\"Total bands: {}\".format(len(bands)))\n return bands\n\n","sub_path":"src/rar.py","file_name":"rar.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"96878947","text":"# Sanket Mundra - IIT2018189\r\nfrom copy import deepcopy\r\nimport os\r\nimport pygame # pygame is used to make the gui\r\n\r\npygame.font.init()\r\n\r\n# constants\r\nINF = 1000000000\r\nwidth = height = 600\r\nROWS = COLS = 8\r\nedge_len = height // ROWS\r\nbrown = (180, 114, 30)\r\nwhite = (255, 255, 255)\r\nblack = (0, 0, 0)\r\ncyan = (0, 255, 255)\r\n# to display the king differently\r\nking = pygame.transform.scale(pygame.image.load('king.png'), (50, 33))\r\n\r\n'''\r\nCheckers class handles the complete game:\r\n * whose turn is it\r\n * what piece is selected\r\n * where all can we move the piece\r\n'''\r\nclass Checkers:\r\n def __init__(self):\r\n self.window = pygame.display.set_mode((width, height))\r\n pygame.display.set_caption('Checkers Game')\r\n self.selected = None\r\n self.board = Board()\r\n self.turn = white # first move of white piece\r\n self.all_moves = {} # stores the current valid moves for a player currently playing\r\n\r\n # updates the game state\r\n def update(self):\r\n self.board.draw(self.window)\r\n pygame.display.update()\r\n\r\n # declares winner of the game\r\n def winner(self):\r\n if (self.board.brown <= 0):\r\n self.window.fill(white)\r\n f = pygame.font.SysFont('Comic Sans MS', 32)\r\n self.window.blit(f.render(\"White Won!!\", False, brown), (width // 2, height // 2))\r\n pygame.display.update()\r\n pygame.time.delay(5000)\r\n return \"White won!!\"\r\n elif (self.board.white <= 0):\r\n self.window.fill(brown)\r\n f = pygame.font.SysFont('Comic Sans MS', 32)\r\n self.window.blit(f.render(\"Brown Won!!\", False, white), (width // 2, height // 2))\r\n pygame.display.update()\r\n pygame.time.delay(5000)\r\n return \"Brown won!!\"\r\n else:\r\n return None\r\n\r\n # on first call, it finds all the moves and then on second call moves to that position\r\n def select(self, row, col):\r\n piece = self.board.get_piece(row, col)\r\n if (self.selected):\r\n self.move(piece, row, col)\r\n\r\n if (piece != 0 and piece.color == self.turn):\r\n self.selected = piece\r\n self.all_moves = self.board.get_all_moves(piece)\r\n return True\r\n\r\n return False\r\n\r\n # moves the previously selected piece to the given row and col\r\n def move(self, piece, row, col):\r\n if (piece == 0 and (row, col) in self.all_moves):\r\n self.board.move(self.selected, row, col)\r\n self.board.remove(self.all_moves[(row, col)])\r\n self.all_moves = {}\r\n if (self.turn == brown):\r\n self.turn = white\r\n else:\r\n self.turn = brown\r\n\r\n def computer(self, board):\r\n self.board = board\r\n self.all_moves = {}\r\n if (self.turn == brown):\r\n self.turn = white\r\n else:\r\n self.turn = brown\r\n\r\n def get_board(self):\r\n return self.board\r\n\r\n\r\n'''\r\nPiece class represents a checkers piece on the board:\r\n * drawing a piece on the board\r\n * moving specific piece\r\n'''\r\nclass Piece:\r\n def __init__(self, row, col, color):\r\n self.row = row\r\n self.col = col\r\n self.x = edge_len * self.col + (edge_len // 2) # x-coordinate of center of the piece on the board\r\n self.y = edge_len * self.row + (edge_len // 2) # y-coordinate of center of the piece on the board\r\n self.color = color\r\n self.is_king = False\r\n\r\n # draws a piece on the screen on respective positions\r\n def draw(self, window):\r\n rad = edge_len // 2 - 10\r\n pygame.draw.circle(window, self.color, (self.x, self.y), rad)\r\n # placing the is_king.png at that location\r\n if (self.is_king):\r\n window.blit(king, (self.x - king.get_width() // 2, self.y - king.get_height() // 2))\r\n\r\n def move(self, row, col):\r\n self.row = row\r\n self.col = col\r\n self.x = edge_len * self.col + (edge_len // 2)\r\n self.y = edge_len * self.row + (edge_len // 2)\r\n\r\n\r\n'''\r\nBoard class represents a checkers board:\r\n * drawing the checkers board\r\n * drawing all the pieces on the board\r\n * hadles all the pieces moving\r\n * moving specific pieces\r\n * deleting (capturing) different pieces\r\n'''\r\nclass Board:\r\n def __init__(self):\r\n self.board = [] # 2D board of checker\r\n self.brown = self.white = 12 # #normal pieces of each type remaining on the bard\r\n self.brown_kings = self.white_kings = 0 # #kings of each type remaining on the board\r\n\r\n # initialise the self.board with correct pieces at correct positions\r\n for i in range(ROWS):\r\n temp = []\r\n for j in range(COLS):\r\n if (j % 2 == (i + 1) % 2):\r\n # top three rows for brown pieces\r\n if (i <= 2):\r\n temp.append(Piece(i, j, brown))\r\n # bottom three rows for white pieces\r\n elif (i >= 5):\r\n temp.append(Piece(i, j, white))\r\n # middle 2 rows initially empty\r\n else:\r\n temp.append(0)\r\n # rest all positions are empty\r\n else:\r\n temp.append(0)\r\n self.board.append(temp)\r\n\r\n # function to draw all the edge_lens on the window\r\n def draw_squares(self, window):\r\n window.fill(white)\r\n for i in range(ROWS):\r\n for j in range((i + 1) % 2, COLS, 2):\r\n pygame.draw.rect(window, black, (i * edge_len, j * edge_len, edge_len, edge_len))\r\n\r\n # to draw all the pieces on the board\r\n def draw(self, window):\r\n self.draw_squares(window)\r\n for i in range(ROWS):\r\n for j in range(COLS):\r\n if (self.board[i][j] != 0):\r\n self.board[i][j].draw(window)\r\n\r\n # function to calculate the utility value at cut-off\r\n # giving priority to killing rather than making king was performing better\r\n def get_score(self):\r\n return (self.brown - self.white) + ((self.brown_kings - self.white_kings) / 2)\r\n\r\n # return all the pieces of a given color\r\n def get_all_pieces(self, color):\r\n pieces = []\r\n for i in range(ROWS):\r\n for j in range(COLS):\r\n if(self.board[i][j] != 0 and self.board[i][j].color == color):\r\n pieces.append(self.board[i][j])\r\n return pieces\r\n\r\n # moves a piece on the new position on the board and updates board and piece accordingly\r\n def move(self, piece, new_row, new_col):\r\n # swapping the initial and final position values\r\n self.board[new_row][new_col] = self.board[piece.row][piece.col]\r\n self.board[piece.row][piece.col] = 0\r\n piece.move(new_row, new_col)\r\n\r\n if (new_row == 0 or new_row == ROWS - 1):\r\n piece.is_king = True\r\n if (piece.color == brown):\r\n self.brown_kings += 1\r\n else:\r\n self.white_kings += 1\r\n\r\n # removes list of pieces from the board\r\n def remove(self, pieces):\r\n for i in range(len(pieces)):\r\n self.board[pieces[i].row][pieces[i].col] = 0\r\n if (pieces[i] != 0):\r\n if (pieces[i].color == brown):\r\n self.brown -= 1\r\n else:\r\n self.white -= 1\r\n\r\n # returns all the moves possible as a dictionary\r\n # key = final position, values = list of pieces that can reach there\r\n def get_all_moves(self, piece):\r\n all_moves = {}\r\n\r\n # checking the feasible moves in downward direction\r\n if (piece.color == brown or piece.is_king):\r\n all_moves.update(self.move_left(piece.row + 1, min(piece.row + 3, ROWS), 1, piece.color, piece.col - 1))\r\n all_moves.update(self.move_right(piece.row + 1, min(piece.row + 3, ROWS), 1, piece.color, piece.col + 1))\r\n\r\n # checking the feasible moves in upward direction\r\n if (piece.color == white or piece.is_king):\r\n all_moves.update(self.move_left(piece.row - 1, max(piece.row - 3, -1), -1, piece.color, piece.col - 1))\r\n all_moves.update(self.move_right(piece.row - 1, max(piece.row - 3, -1), -1, piece.color, piece.col + 1))\r\n\r\n return all_moves\r\n\r\n # move in left diagonal\r\n def move_left(self, beg, end, inc, color, l, removed = []):\r\n moves = {}\r\n last_piece = []\r\n for i in range(beg, end, inc):\r\n if (l < 0):\r\n break\r\n\r\n if (self.board[i][l] == 0):\r\n if (len(removed) > 0 and len(last_piece) == 0):\r\n break\r\n else:\r\n moves[(i, l)] = last_piece + removed\r\n\r\n # next recursive call\r\n if (len(last_piece) > 0):\r\n if (inc == -1):\r\n row = max(i - 3, -1)\r\n else:\r\n row = min(i + 3, ROWS)\r\n moves.update(self.move_left(i + inc, row, inc, color, l - 1, last_piece))\r\n moves.update(self.move_right(i + inc, row, inc, color, l + 1, last_piece))\r\n break\r\n elif (self.board[i][l].color != color):\r\n last_piece.append(self.board[i][l])\r\n else:\r\n break\r\n l -= 1\r\n return moves\r\n\r\n # move in right diagonal\r\n def move_right(self, beg, end, inc, color, r, removed = []):\r\n moves = {}\r\n last_piece = []\r\n for i in range(beg, end, inc):\r\n if (r >= COLS):\r\n break\r\n\r\n if (self.board[i][r] == 0):\r\n if (len(removed) > 0 and len(last_piece) == 0):\r\n break\r\n else:\r\n moves[(i, r)] = last_piece + removed\r\n\r\n # next recursive call\r\n if (len(last_piece) > 0):\r\n if (inc == -1):\r\n row = max(i - 3, -1)\r\n else:\r\n row = min(i + 3, ROWS)\r\n moves.update(self.move_left(i + inc, row, inc, color, r - 1, last_piece))\r\n moves.update(self.move_right(i + inc, row, inc, color, r + 1, last_piece))\r\n break\r\n elif (self.board[i][r].color != color):\r\n last_piece.append(self.board[i][r])\r\n else:\r\n break\r\n r += 1\r\n return moves\r\n\r\n def get_piece(self, row, col):\r\n return self.board[row][col]\r\n\r\n def print_board(self):\r\n for i in range(ROWS):\r\n for j in range(COLS):\r\n if (self.board[i][j] != 0):\r\n if(self.board[i][j].color == white):\r\n print(\"W \", end = \"\")\r\n else:\r\n print(\"B \", end = \"\")\r\n else:\r\n print(\"0 \", end = \"\")\r\n print(\"\")\r\n\r\n# mini-max function\r\ndef mini_max(current_board, depth, turn,alpha,beta, checkers):\r\n # 3 = cut-off limit for the mini-max\r\n print(\"HELLO\")\r\n if (depth == 3 or checkers.winner() != None):\r\n print(\"HELLO1\")\r\n return current_board.get_score(), current_board\r\n\r\n # MAX player turn\r\n if (turn == 1):\r\n print(\"HELLO2\")\r\n moved_boards = get_moved_boards(current_board, brown, checkers)\r\n bestVal = -1000\r\n best_moved_board = None\r\n for i in range(len(moved_boards)):\r\n val, temp = mini_max(moved_boards[i], depth + 1, 0, alpha, beta,checkers)\r\n bestVal = max( bestVal, val) \r\n alpha = max( alpha, bestVal)\r\n if (beta <= alpha):\r\n print(\"HELLO4\")\r\n best_moved_board = moved_boards[i]\r\n break\r\n return bestVal, best_moved_board\r\n\r\n # MIN player turn\r\n else:\r\n print(\"HELLO3\")\r\n moved_boards = get_moved_boards(current_board, white, checkers)\r\n bestVal = 1000\r\n best_moved_board = None\r\n for i in range(len(moved_boards)):\r\n val, temp = mini_max(moved_boards[i], depth + 1, 1,alpha,beta, checkers)\r\n bestVal = min( bestVal, val) \r\n beta = min( beta, bestVal)\r\n if (beta <= alpha):\r\n print(\"HELLO5\")\r\n best_moved_board = moved_boards[i]\r\n break\r\n return bestVal, best_moved_board\r\n\r\n# function to find all the next state boards\r\ndef get_moved_boards(board, color, checkers):\r\n new_boards = []\r\n all_pieces = board.get_all_pieces(color)\r\n for i in range(len(all_pieces)):\r\n all_moves = board.get_all_moves(all_pieces[i])\r\n '''\r\n print(all_moves)\r\n for i in all_moves:\r\n print(i, end = \" \")\r\n if len(all_moves[i]) > 0:\r\n print(all_moves[i][0].row, all_moves[i][0].col)\r\n '''\r\n for move, remove in all_moves.items():\r\n temp_board = deepcopy(board)\r\n temp_piece = temp_board.get_piece(all_pieces[i].row, all_pieces[i].col)\r\n\r\n # applying changes\r\n temp_board.move(temp_piece, move[0], move[1])\r\n if (len(remove) > 0):\r\n temp_board.remove(remove)\r\n new_boards.append(temp_board)\r\n\r\n return new_boards\r\n\r\ndef main():\r\n run = True\r\n # setting up pygame display\r\n checkers = Checkers()\r\n\r\n while run:\r\n\r\n '''\r\n comment this if condition: if (checkers.turn == brown):\r\n as well as the below one: if (checkers.turn == white):\r\n to play human vs human\r\n '''\r\n if (checkers.turn == brown):\r\n print(\"HELLP\")\r\n value, new_board = mini_max(checkers.get_board(), 0, 1,1000,-1000, checkers)\r\n print(value)\r\n checkers.computer(new_board)\r\n\r\n # uncomment this part for computer vs computer\r\n '''\r\n if (checkers.turn == white):\r\n value, new_board = mini_max(checkers.get_board(), 0, 0, checkers)\r\n checkers.computer(new_board)\r\n '''\r\n\r\n # destination point or goal reached\r\n if (checkers.winner() != None):\r\n run = False\r\n\r\n # run through all the events happening at the moment\r\n for i in pygame.event.get():\r\n if (i.type == pygame.QUIT):\r\n print(\"Terminated!!\")\r\n run = False\r\n\r\n if (i.type == pygame.MOUSEBUTTONDOWN):\r\n pos_xy = pygame.mouse.get_pos()\r\n row = pos_xy[1] // edge_len\r\n col = pos_xy[0] // edge_len\r\n checkers.select(row, col)\r\n checkers.update()\r\n pygame.quit()\r\n\r\nmain()\r\n","sub_path":"AlphaBetaPruning/checkers.py","file_name":"checkers.py","file_ext":"py","file_size_in_byte":14840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"135633991","text":"# -*- encoding: utf-8 -*-\nfrom __future__ import (unicode_literals, absolute_import,\n division, print_function)\nimport unittest\nimport json\nimport re\nimport glob\nfrom os.path import join, dirname\nfrom jsonschema.validators import Draft4Validator\nimport jsonschema.exceptions\n\nfrom ..validator import OParl\nfrom ..schema import SCHEMA_DIR\n\nDATA_DIR = join(dirname(__file__), 'testdata')\n\n\nclass TestSchema(unittest.TestCase):\n # pylint: disable=protected-access\n\n #\n # Helper functions\n #\n\n def _safe_load_json_file(self, filename):\n try:\n with open(filename) as json_file:\n return json.load(json_file)\n except (IOError, ValueError) as expr:\n print(expr)\n return None\n\n def _load_test_file(self, filename):\n return self._safe_load_json_file(join(DATA_DIR, filename))\n\n def _do_test_json_validation(self, obj_type, testfile):\n data = self._load_test_file(testfile)\n self.assertIsNotNone(data,\n \"test data '%s' not found or invalid\" % testfile)\n return OParl._validate_schema(obj_type, data)\n\n def _test_validation(self, obj_type, testfile, expected_errors=None):\n \"\"\"\n Check validation results. \"expected_errors\" contains patterns\n for matching the error messages retuned by the validation. All\n returned messages should match on supplied item and all\n supplied items should match one returned error message. So if\n no \"expected_errors\" are supplied, the validation should\n pass.\n \"\"\"\n\n if expected_errors is None:\n expected_errors = list()\n\n for error in self._do_test_json_validation(obj_type, testfile):\n for expected_error in expected_errors:\n if re.search(expected_error, error.message):\n expected_errors.remove(expected_error)\n break\n else:\n self.assertTrue(False, '%s: Unexpected error message: \"%s\"' %\n (testfile, error.message))\n if len(expected_errors) > 0:\n self.assertTrue(False,\n '%s: Missing some expected error messages: %s' %\n (testfile, expected_errors))\n\n def _test_missing_item(self, obj_type, filename, missing_items):\n \"\"\"\n This is a shortcut for testing for missing properties in the\n testdata. The verification is expected to fail and all\n supplied \"missing_items\" are expected as error messages.\n \"\"\"\n expected_errors = ['%s.*is a required property' %\n missing for missing in missing_items]\n self._test_validation(obj_type, filename, expected_errors)\n\n #\n # Real tests\n #\n\n def test_validate_schema(self):\n def _validate(filename):\n try:\n data = self._safe_load_json_file(filename)\n self.assertIsNotNone(data, '%s: invalid json syntax' %\n filename)\n Draft4Validator(Draft4Validator.META_SCHEMA).validate(data)\n except (jsonschema.exceptions.ValidationError,\n jsonschema.exceptions.SchemaError) as excp:\n self.assertTrue(False, '%s: oparl schema invalid: %s' %\n (filename, excp))\n\n for schema_file in glob.glob(join(SCHEMA_DIR, '*.json')):\n _validate(schema_file)\n\n def test_build_object_type(self):\n self.assertEquals(OParl._build_object_type('oparl:Document'),\n 'oparl:Document')\n self.assertEquals(OParl._build_object_type('document'),\n 'oparl:Document')\n self.assertEquals(OParl._build_object_type('Document'),\n 'oparl:Document')\n self.assertEquals(OParl._build_object_type('DOCUMENT'),\n 'oparl:Document')\n self.assertEquals(OParl._build_object_type('oparl:document'),\n 'oparl:document')\n self.assertEquals(OParl._build_object_type('agenda_item'),\n 'oparl:AgendaItem')\n\n def test_valid_agenda(self):\n self._test_validation('oparl:AgendaItem', 'agenda_item.valid.json')\n\n def test_valid_body(self):\n self._test_validation('oparl:Body', 'body.valid.json')\n\n def test_valid_consultation(self):\n self._test_validation('oparl:Consultation', 'consultation.valid.json')\n\n def test_valid_document(self):\n self._test_validation('oparl:Document', 'document.valid.json')\n\n def test_valid_meeting(self):\n self._test_validation('oparl:Meeting', 'meeting.valid.json')\n\n def test_valid_organization(self):\n self._test_validation('oparl:Organization', 'organization.valid.json')\n\n def test_valid_paper(self):\n self._test_validation('oparl:Paper', 'paper.valid.json')\n\n def test_valid_person(self):\n self._test_validation('oparl:Person', 'person.valid.json')\n\n def test_valid_system(self):\n self._test_validation('oparl:System', 'system.valid.json')\n\n def test_invalid_agenda(self):\n self._test_missing_item(\n 'oparl:AgendaItem',\n 'agenda_item.missing.type.json',\n ['type'])\n\n def test_invalid_body(self):\n self._test_missing_item(\n 'oparl:Body',\n 'body.missing.system_paper_member_meeting_organization.json',\n ['system', 'paper', 'member', 'meeting', 'organization'])\n\n def test_invalid_consultation(self):\n self._test_missing_item(\n 'oparl:Consultation',\n 'consultation.missing.committee_agendaItem_paper.json',\n ['committee', 'agendaItem', 'paper'])\n\n def test_invalid_document(self):\n self._test_missing_item(\n 'oparl:Document',\n 'document.missing.fileName_mimeType_date_'\n 'modified_size_accessUrl.json',\n ['fileName', 'mimeType', 'data', 'modified',\n 'size', 'accessUrl'])\n\n def test_invalid_meeting(self):\n self._test_missing_item(\n 'oparl:Meeting',\n 'meeting.missing.start_organization.json',\n ['start', 'organization'])\n\n def test_invalid_membership(self):\n self._test_missing_item(\n 'oparl:Membership',\n 'membership.missing.person_organization.json',\n ['person', 'organization'])\n\n def test_invalid_organization(self):\n self._test_missing_item(\n 'oparl:Organization',\n 'organization.missing.body_nameLong_member.json',\n ['body', 'nameLong', 'member'])\n\n def test_invalid_paper(self):\n self._test_missing_item(\n 'oparl:Paper',\n 'paper.missing.name_body.json',\n ['name', 'body'])\n\n def test_invalid_person(self):\n self._test_missing_item(\n 'oparl:Person',\n 'person.missing.name.json',\n ['name'])\n\n def test_invalid_system(self):\n self._test_missing_item(\n 'oparl:System',\n 'system.missing.oparlVersion_bodies.json',\n ['oparlVersion', 'bodies'])\n","sub_path":"oparlvalidator/tests/test_schema.py","file_name":"test_schema.py","file_ext":"py","file_size_in_byte":7221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"458014959","text":"############################################################################\n# Project: Electrical Pre-Conditioning of Convective Clouds,\n# Title: PG Quickplots from RUAO or Chilbolton\n# Author: James Gilmore,\n# Email: james.gilmore@pgr.reading.ac.uk.\n# Version: 1.4\n# Date: 03/08/2018\n# Status: Stable\n# Change: Added in support for Matplotlib backends. You can specify the backend from the command line and from check which backends are available.\n############################################################################\n\"\"\"Import Python Libraries\"\"\"\nfrom __future__ import absolute_import, division, print_function\nfrom io import StringIO\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime, timedelta, time\nimport csv, os, glob, sys, argparse, fnmatch, warnings, pyaudio, wave\nfrom netCDF4 import Dataset as NetCDFFile\nimport time as systime\nfrom scipy import stats\n\nwith warnings.catch_warnings():\n\twarnings.simplefilter(\"ignore\")\n\t\n\timport matplotlib.pyplot as plt\n\timport matplotlib.backends\n\tfrom matplotlib.ticker import MultipleLocator, FormatStrFormatter, MaxNLocator\n\tfrom matplotlib.dates import DateFormatter, MinuteLocator, HourLocator, DayLocator\n\nwarnings.simplefilter('always')\t\n\n############################################################################\n\"\"\"Global Variables\"\"\"\n\n#File location RUAO and Chilbolton data. The 'Low' and 'High' specify the quality of the PG data.\n#'Low' is the location of the latest data and has potentially not been processed yet for any errors (e.g. METFiDAS-Incoming). \n#'High' is the processed data with some quality controls. This might not always be available for all dates as 'Low'\nDirectories = {'High' : {'RUAO' : \"/glusterfs/phd/users/th863480/PhD_Data/Field_Mill/Level1/RUAO/Daily/\",\n\t\t\t\t\t\t'Chilbolton' : \"/glusterfs/phd/users/th863480/PhD_Data/Field_Mill/Level1/Chilbolton/1sec/\"},\n\t\t\t\t'Low' : {'RUAO' : \"/export/its/labs/METFiDAS-Incoming/Level1/\",\n\t\t\t\t\t\t\t'Chilbolton' : \"/export/its/labs/ChilboltonPG/Archive/\"}}\n\n############################################################################\n\"\"\"Classes\"\"\"\n\t\nclass PG_Plotter(object):\n\t\"\"\"Plots the PG at the specified location between the specified dates.\n\t\n\tN.B. As this is a quick plotter only basic quality control checks have been performed\n\ton the PG data. This is to ensure that you can plot the latest PG as possible as quick\n\tas possible. The higher grade quality control data found in the gluster\"\"\"\n\t\n\tdef __init__(self, Location, Date_Start, Date_End, Save_Plot=True, Fixed_Axis=False, Save_Dir=\"\", File_Name=None, High_Grade=False, Print_Progress=True, Return_Data=False, Force_Plotting=False):\n\t\t\"\"\"Setup the PG data procedures\n\t\t\n\t\tParameters\n\t\t----------\n\t\tLocation : str\n\t\t\tThe physical location name which you want to plot the PG data. Current the \n\t\t\tavailable options are\n\t\t\t\n\t\t\t1) RUAO\n\t\t\t2) Chilbolton\n\t\t\t3) All\n\t\t\t\n\t\t\tAll will plot all PG data from each location together on one plot\n\t\tDate_Start : python datetime\n\t\t\tThe start datetime you want to plot the PG data from. You can specify to an\n\t\t\taccuracy of 1 second.\n\t\tDate_End : python datetime\n\t\t\tThe end datetime you want to plot the PG data from. You can specify to an\n\t\t\taccuracy of 1 second.\n\t\tSave_Plot : boolean, optional, default is True\n\t\t\tSpecify whether to save the plot to file (True) or to output the plot to the\n\t\t\tconsole (False).\n\t\tFixed_Axis : boolean, optional, default is False\n\t\t\tOnly used when Location is set to 'All'. It specifies if you should force the\n\t\t\ty-axis (PG) to have the same upper and lower limits for each location.\n\t\tSave_Dir : str, optional, default == \"\"\n\t\t\tSpecify the file location you want to save the plot. Only used if Save_Plot is\n\t\t\tTrue.\n\t\tFile_Name : str, optional, default is None\n\t\t\tSpecify the file name of the plot you want to save. Only used if Save_Plot is\n\t\t\tTrue.\n\t\tHigh_Grade : boolean, optional, default is False\n\t\t\tSpecify if you want to use the fully processed PG data or the minor processed\n\t\t\tPG data. Using the fully processed PG data comes at a cost that it only updates\n\t\t\tonce per day while the minor processed PG data is updated every 10 minutes for\n\t\t\tRUAO data and every hour for Chilbolton data.\n\t\tPrint_Progress : boolean, optional, default is True\n\t\t\tSpecify if you want to print the progress of processing the PG data to the\n\t\t\tconsole.\n\t\tReturn_Data : boolean, optional, default is False\n\t\t\tSpecify if you want to return the data to an array for further processing rather\n\t\t\tthan for plotting.\n\t\t\t\n\t\t\"\"\"\n\t\t\n\t\tself.High_Grade = High_Grade\n\n\t\t#Specify all available data locations. If High_Grade is False then use the RUAO stored Chilbolton Data rather than the glusterfs which is only updated once per day\n\t\tif High_Grade is True:\n\t\t\tself.Directories = Directories['High']\n\t\telif High_Grade is False:\n\t\t\tself.Directories = Directories['Low']\n\t\telse:\n\t\t\tsys.exit(\"[Error] High_Grade must be set to either True or False. High_Grade was set to %s\" % High_Grade)\n\t\t\n\t\t#Datetime_Importer\n\t\tself.str2date = lambda x: np.datetime64(datetime.strptime(x.decode(\"utf-8\"), '%d/%m/%Y'))\n\t\t\n\t\t#Set-up data importer\n\t\tself.EPCC_Data = _EPCC_Importer()\t\n\t\t\n\t\t#Determine if you want to plot even if no PG data is available\n\t\tself.Force_Plotting = Force_Plotting\n\t\t\n\t\t#Plotting requirements\n\t\tplt.style.use('classic') #necessary if Matplotlib version is >= 2.0.0\n\t\t\n\t\t#Set-up conditionals\n\t\tself.Save_Location = Save_Dir + 'PG_Quickplot_' + Location + '_' + Date_Start.strftime('%Y%m%d_%H%M%S') + '_to_' + Date_End.strftime('%Y%m%d_%H%M%S') + '.png' if File_Name is None else Save_Dir + File_Name\n\t\tself.Date_Start = Date_Start\n\t\tself.Date_End = Date_End\n\t\tself.Save_Plot = Save_Plot\n\t\tself.Print_Progress = Print_Progress\n\t\tself.Instrument_Calibration_File = StringIO(u\"Date,METFiDAS_Version,Notes\\n07/05/2006,0,METFiDAS 1 is operational\\n17/05/2006,1,Field Mill installed and calibrated by Alec Bennett. For the PGRR analysis we use this data as the starting point for analysis\\n08/06/2014,0,METFiDAS 1 data is unreliable initial signs of logger failure\\n11/06/2014,0,METFiDAS 1 is not operational as the logger has failed\\n28/08/2014,0,METFiDAS 3 is operational again (NO FIELD MILL :()\\n14/04/2015,3,METFiDAS 3a overtakes data logging\")\n\t\t\n\t\tif Return_Data is True: return\n\t\t\n\t\tif Location == 'All':\n\t\t\t\n\t\t\tRUAO_Data = self._RUAO(Return_Data = True)\n\t\t\tChilbolton_Data = self._Chilbolton(Return_Data = True)\n\n\t\t\t#Plot Data\n\t\t\tplot_title = 'RUAO and Chilbolton PG Timeseries' if Fixed_Axis is False else 'RUAO and Chilbolton PG Timeseries (Fixed Y Axis)'\n\t\t\tAll_Plot = _SPMods(2, plot_title, np.nan, DateRange=[self.Date_Start, self.Date_End])\n\t\t\tif Fixed_Axis is True:\n\t\t\t\tif (RUAO_Data is not False) & (Chilbolton_Data is not False):\n\t\t\t\t\tAll_Plot.Field_Mill(RUAO_Data[:,0], RUAO_Data[:,1], Field_Mill_Type='RUAO', Time_Range=[self.Date_Start, self.Date_End], Fixed_Range=[np.nanmin(np.append(RUAO_Data[:,1], Chilbolton_Data[:,1]).astype(float)), np.nanmax(np.append(RUAO_Data[:,1], Chilbolton_Data[:,1]).astype(float))], Overlap_Ticks=True) \n\t\t\t\t\tAll_Plot.Field_Mill(Chilbolton_Data[:,0], Chilbolton_Data[:,1], Field_Mill_Type='Chilbolton', Time_Range=[self.Date_Start, self.Date_End], Fixed_Range=[np.nanmin(np.append(RUAO_Data[:,1], Chilbolton_Data[:,1]).astype(float)), np.nanmax(np.append(RUAO_Data[:,1], Chilbolton_Data[:,1]).astype(float))], Overlap_Ticks=True)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"[WARNING] RUAO or Chilbolton data is unavailable so can't fix the axes. Plotting exclusively...\")\n\t\t\t\t\tAll_Plot.Field_Mill(RUAO_Data[:,0], RUAO_Data[:,1], Time_Range=[self.Date_Start, self.Date_End], Field_Mill_Type='RUAO') if RUAO_Data is not False else All_Plot._PlotError(\"RUAO Data Unavailable!\")\n\t\t\t\t\tAll_Plot.Field_Mill(Chilbolton_Data[:,0], Chilbolton_Data[:,1], Time_Range=[self.Date_Start, self.Date_End], Field_Mill_Type='Chilbolton') if Chilbolton_Data is not False else All_Plot._PlotError(\"Chilbolton Data Unavailable!\")\n\t\t\telse:\n\t\t\t\tAll_Plot.Field_Mill(RUAO_Data[:,0], RUAO_Data[:,1], Time_Range=[self.Date_Start, self.Date_End], Field_Mill_Type='RUAO') if RUAO_Data is not False else All_Plot._PlotError(\"RUAO Data Unavailable!\")\n\t\t\t\tAll_Plot.Field_Mill(Chilbolton_Data[:,0], Chilbolton_Data[:,1], Time_Range=[self.Date_Start, self.Date_End], Field_Mill_Type='Chilbolton') if Chilbolton_Data is not False else All_Plot._PlotError(\"Chilbolton Data Unavailable!\")\n\t\t\tAll_Plot._PlotSave(self.Save_Location) if self.Save_Plot is True else All_Plot.Show()\n\t\t\n\t\telif Location == 'RUAO': \n\t\t\t\n\t\t\tself._RUAO()\n\t\t\t\n\t\telif Location == 'Chilbolton': \n\t\t\t\n\t\t\tself._Chilbolton()\n\t\n\t\treturn\n\t\t\n\tdef _RUAO(self, Return_Data=False, Expanded_Data=False):\n\t\t\"\"\"Import, process and plot the PG data between the dates specified from the \n\t\tcommand line.\n\t\t\n\t\tParameters\n\t\t----------\n\t\tReturn_Data : bool, optional, default is False\n\t\t\tSpecify whether you want to return the data when this sub function is called\n\t\t\trather than plot the output data.\n\t\tExpanded_Data : bool, optional, default if False\n\t\t\tSpecify whether you want to extract an expanded dataset beyond just the PG\n\t\t\tmeasurements. The extra data will include the rain, sg, sd and cloud data.\n\t\t\n\t\t\"\"\"\n\t\t\n\t\t#Get Calibrations for Instruments\n\t\tcalibrations = np.genfromtxt(self.Instrument_Calibration_File, delimiter=',', names=True, dtype=None, converters = {0: self.str2date})\n\t\t\t\t\t\t\n\t\t#Import file list\n\t\tif self.High_Grade is True:\n\t\t\tfilelist = sorted(glob.glob(self.Directories['RUAO'] + '*.nc'))\n\t\telif self.High_Grade is False:\n\t\t\tfilelist = []\n\t\t\tfor root, dirnames, filenames in os.walk(self.Directories['RUAO']):\n\t\t\t\tfor filename in fnmatch.filter(filenames, '*SMP1*.csv'):\n\t\t\t\t\tfilelist.append(os.path.join(root, filename))\n\t\t\t\t\t\n\t\t\t\t#for filename in fnmatch.filter(filenames, '*Smp1Sec.csv'):\n\t\t\t\t#\tfilelist.append(os.path.join(root, filename))\n\n\t\t#Determine dates for each file\n\t\tdatelist = np.zeros(len(filelist), dtype=object)\n\t\tif self.High_Grade is True:\n\t\t\tfor i in xrange(len(filelist)): \n\t\t\t\tdatelist[i] = datetime.strptime(os.path.basename(filelist[i]), 'RUAO_PGRR_Data_1Hz_%Y%m%d.nc')\n\t\telif self.High_Grade is False:\n\t\t\tfor i in xrange(len(filelist)): \n\t\t\t\ttry:\n\t\t\t\t\t#datelist[i] = datetime.strptime(os.path.basename(filelist[i])[:10], '%Y-%m-%d')\n\t\t\t\t\tdatelist[i] = datetime.strptime(os.path.basename(filelist[i]), '%Y-SMP1-%j.csv')\n\t\t\t\texcept:\n\t\t\t\t\tdatelist[i] = datetime(1900,1,1)\n\t\t\n\t\t#Determine the files to import\n\t\tfilelist = np.array(filelist, dtype=str)\n\t\tif self.Print_Progress is True: print(\"Finding RUAO files between %s and %s\" % (self.Date_Start, self.Date_End))\n\t\t\n\t\t\n\t\tWanted_File_List = filelist[(datelist >= datetime.combine(self.Date_Start, time(0,0,0))) & (datelist <= datetime.combine(self.Date_End, time(0,0,0)))]\n\t\tWanted_Date_List = datelist[(datelist >= datetime.combine(self.Date_Start, time(0,0,0))) & (datelist <= datetime.combine(self.Date_End, time(0,0,0)))]\n\t\t\n\t\tRUAO_Data_All = _totalarray()\n\t\tfor File_Loc, Date_Loc in zip(Wanted_File_List, Wanted_Date_List):\n\t\t\t\n\t\t\t#Import datafile\n\t\t\tif self.High_Grade is True:\n\t\t\t\tRUAO_Data = self.EPCC_Data.RUAO_Calibrate(File_Loc, NC_File=True, unpack=False)\n\t\t\t\t\n\t\t\t\tif RUAO_Data is None: continue\n\t\t\t\t\n\t\t\telif self.High_Grade is False:\n\t\t\t\t#Get Calibration Values\n\t\t\t\tCalibrations_Selected = calibrations[calibrations['Date'] < Date_Loc][-1]\n\t\t\t\t\n\t\t\t\tif Expanded_Data is True:\n\t\t\t\t\n\t\t\t\t\tRUAO_Data = self.EPCC_Data.RUAO_Calibrate(File_Loc, Col=(0,45,5,6), unpack=False)\n\t\t\t\t\t\n\t\t\t\t\tif RUAO_Data is None: continue\n\t\t\t\t\t\n\t\t\t\t\t#Calibrate PG, Sg, Sd data\n\t\t\t\t\t#RUAO_Data[:,1] = (RUAO_Data[:,1].astype(float).copy() - Calibrations_Selected[\"PG_Offset\"])/Calibrations_Selected[\"PG_Multipler\"]\n\t\t\t\t\t#RUAO_Data[:,2] = (RUAO_Data[:,2].astype(float).copy() - Calibrations_Selected[\"Sg_Offset\"])/Calibrations_Selected[\"Sg_Multipler\"]\n\t\t\t\t\t#RUAO_Data[:,3] = (RUAO_Data[:,3].astype(float).copy() - Calibrations_Selected[\"Sd_Offset\"])/Calibrations_Selected[\"Sd_Multipler\"]\n\t\t\t\telse:\n\t\t\t\t\tRUAO_Data = self.EPCC_Data.RUAO_Calibrate(File_Loc, Col=(0,45), unpack=False)\n\t\t\t\t\t\n\t\t\t\t\tif RUAO_Data is None: continue\n\t\t\t\t\t\n\t\t\t\t\t#Calibrate PG data\n\t\t\t\t\t#RUAO_Data[:,1] = (RUAO_Data[:,1].astype(float).copy() - Calibrations_Selected[\"PG_Offset\"])/Calibrations_Selected[\"PG_Multipler\"]\n\t\t\t\t\t\n\t\t\t\t#Remove out of bounds data\t\n\t\t\t\tRUAO_Data = RUAO_Data[(RUAO_Data[:,0] >= self.Date_Start) & (RUAO_Data[:,0] < self.Date_End)]\n\t\t\t\n\t\t\tRUAO_Data_All.update([RUAO_Data])\n\t\t\t\n\t\tRUAO_Data_All = (RUAO_Data_All.finalize(object))\n\t\t\n\t\tif RUAO_Data_All.size == 0:\n\t\t\tif self.Force_Plotting is False:\n\t\t\t\tsys.exit(\"[WARNING] No RUAO data could be found for the specified dates! Please check date availability using --ping argument!\")\n\t\t\telse:\n\t\t\t\twarnings.warn(\"[WARNING] No RUAO data could be found for the specified dates! Please check date availability using --ping argument!\", ImportWarning, stacklevel=2)\n\t\t\t\tif Return_Data is True: return False\n\t\t\t\tRUAO_Data_All = False\n\t\t\n\t\tif RUAO_Data_All is not False:\n\t\t\tRUAO_Data_All = RUAO_Data_All[RUAO_Data_All[:,0].argsort()]\n\t\t\tRUAO_Data_All = np.concatenate((RUAO_Data_All,[[self.Date_End, np.nan, np.nan, np.nan]])) if Expanded_Data is True else np.concatenate((RUAO_Data_All,[[self.Date_End, np.nan]]))\n\t\t\n\t\tif Return_Data is True: return RUAO_Data_All\n\n\t\t#Plot Data\n\t\tplot_title = 'RUAO PG Timeseries between ' + self.Date_Start.strftime('%d/%m/%Y %H:%M:%S') + \" and \" + self.Date_End.strftime('%d/%m/%Y %H:%M:%S')\n\t\tRUAO_Plot = _SPMods(1, plot_title, np.nan, DateRange=[self.Date_Start, self.Date_End])\n\t\tRUAO_Plot.Field_Mill(RUAO_Data_All[:,0], RUAO_Data_All[:,1], Time_Range=[self.Date_Start, self.Date_End], Field_Mill_Type='RUAO') if RUAO_Data_All is not False else RUAO_Plot._PlotError(\"RUAO Data Unavailable!\")\n\t\tif self.Save_Plot is True: RUAO_Plot._PlotSave(self.Save_Location)\n\t\tif self.Save_Plot is False: RUAO_Plot.Show()\n\t\n\tdef _Chilbolton(self, Return_Data=False):\n\n\t\t#Plotting requirements\n\t\tplt.style.use('classic') #necessary if Matplotlib version is >= 2.0.0\n\t\t\n\t\t#Import file list\n\t\tfilelist = sorted(glob.glob(self.Directories['Chilbolton'] + '*.nc')) if self.High_Grade is True else sorted(glob.glob(self.Directories['Chilbolton'] + '*1Sec.csv'))\n\t\t\t\n\t\t#Determine dates for each file\n\t\tdatelist = np.zeros(len(filelist), dtype=object)\n\t\tif self.High_Grade is True:\n\t\t\tfor i in xrange(len(filelist)): \n\t\t\t\ttry:\n\t\t\t\t\tdatelist[i] = datetime.strptime(os.path.basename(filelist[i]), \"Field_Mill_PG_Chilbolton_1sec_%Y%m%d.nc\")\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\t\telif self.High_Grade is False:\n\t\t\tfor i in xrange(len(filelist)): \n\t\t\t\ttry:\n\t\t\t\t\tdatelist[i] = datetime.strptime(os.path.basename(filelist[i]), \"%Y-%m-%d-PG1Sec.csv\")\n\t\t\t\texcept:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdatelist[i] = datetime.strptime(os.path.basename(filelist[i]), \"%Y-%m-%d_PG1Sec.csv\")\n\t\t\t\t\texcept:\n\t\t\t\t\t\tprint(\"ERROR\", os.path.basename(filelist[i]))\n\t\t\t\t\t\tcontinue\n\t\t\n\t\t#Determine the files to import\n\t\tfilelist = np.array(filelist, dtype=str)\n\t\tif self.Print_Progress is True: print(\"Finding Chilbolton files between %s and %s\" % (self.Date_Start, self.Date_End))\n\t\t\n\t\tWanted_File_List = filelist[(datelist >= datetime.combine(self.Date_Start, time(0,0,0))) & (datelist <= datetime.combine(self.Date_End, time(0,0,0)))]\n\t\tWanted_Date_List = datelist[(datelist >= datetime.combine(self.Date_Start, time(0,0,0))) & (datelist <= datetime.combine(self.Date_End, time(0,0,0)))]\n\t\t\n\t\tChilbolton_Data_All = _totalarray()\n\t\tfor File_Loc, Date_Loc in zip(Wanted_File_List, Wanted_Date_List):\n\t\t\t\t\t\t\n\t\t\t#Import datafile\n\t\t\tif self.High_Grade is True:\n\t\t\t\tChilbolton_Data = self.EPCC_Data.FieldMill_Calibrate(File_Loc, unpack=False, hours2dt=True, Data_Level=1)\n\t\t\telse:\n\t\t\t\tChilbolton_Data = self.EPCC_Data.FieldMill_Calibrate(File_Loc, unpack=False, hours2dt=True, Data_Level=0)\n\n\t\t\t#Check Chilbolton_Data exists\n\t\t\tif Chilbolton_Data is None: continue\n\n\t\t\t#Calibrate data and remove out of bounds data\n\t\t\ttry:\n\t\t\t\tChilbolton_Data = Chilbolton_Data[(Chilbolton_Data[:,0] >= self.Date_Start) & (Chilbolton_Data[:,0] < self.Date_End)]\n\t\t\texcept:\n\t\t\t\tprint(\"Chilbolton_Data\", type(Chilbolton_Data))\n\t\t\t\tprint(\"self.Date_Start\", self.Date_Start)\n\t\t\t\tprint(\"self.Date_End\", self.Date_End)\n\t\t\t\tprint(\"Chilbolton_Data[:,0]\", type(Chilbolton_Data[:,0]))\n\t\t\t\n\t\t\tChilbolton_Data_All.update([Chilbolton_Data])\n\t\t\n\t\tChilbolton_Data_All = (Chilbolton_Data_All.finalize(object))\n\t\t\n\t\tif Chilbolton_Data_All.size == 0:\n\t\t\tif self.Force_Plotting is False:\n\t\t\t\traise ImportWarning(\"[WARNING] No Chilbolton data could be found for the specified dates! Please check date availability using --ping argument!\")\n\t\t\telse:\n\t\t\t\tImportWarning(\"[WARNING] No Chilbolton data could be found for the specified dates! Please check date availability using --ping argument!\")\n\t\t\t\tif Return_Data is True: return False\n\t\t\t\tChilbolton_Data_All = False\n\t\t\t\t\n\t\tif Chilbolton_Data_All is not False:\n\t\t\tChilbolton_Data_All = Chilbolton_Data_All[Chilbolton_Data_All[:,0].argsort()]\n\t\t\tChilbolton_Data_All = np.concatenate((Chilbolton_Data_All,[[self.Date_End, np.nan]]))\n\t\t\t\t\n\t\tif Return_Data is True: return Chilbolton_Data_All\n\t\t\n\t\t#Plot Data\n\t\tplot_title = 'Chilbolton PG Timeseries between ' + self.Date_Start.strftime('%d/%m/%Y %H:%M:%S') + \" and \" + self.Date_End.strftime('%d/%m/%Y %H:%M:%S')\n\t\tRUAO_Plot = _SPMods(1, plot_title, np.nan, DateRange=[self.Date_Start, self.Date_End])\n\t\tRUAO_Plot.Field_Mill(Chilbolton_Data_All[:,0], Chilbolton_Data_All[:,1], Time_Range=[self.Date_Start, self.Date_End], Field_Mill_Type='Chilbolton') if Chilbolton_Data_All is not False else RUAO_Plot._PlotError(\"Chilbolton Data Unavailable!\")\n\t\tif self.Save_Plot is True: RUAO_Plot._PlotSave(self.Save_Location)\n\t\tif self.Save_Plot is False: RUAO_Plot.Show()\n\nclass PG_Report(object):\n\t\"\"\"This class variable will create a simple report which will give details of the \n\tweather conditions, focusing on the potential gradient measurements. The aim for\n\tthe report is to provide a useful guide for launching radiosondes into charged \n\tclouds. Therefore, the information we want to provide that will be useful include:\n\t\n\t1) Descriptive Statistics of PG (min, max, mean, median, sd, range)\n\t2) Step Detection of PG\n\t3) Lightning Detection within PG\n\t4) FFT highlighting very low PG oscillations\n\t5) The cloud type using the diffuse solar radiation measurements\n\t\n\tThe report will also provide a one line statement suggesting whether flying a \n\tradiosonde is a good idea. The statements are categorised by a code system and\n\tthe possible codes are:\n\t\n\tCode 0 : No Cloud of Charge Activity of Interest\n\tCode 1 : Cloud Detected Overhead, but No Charge Activity Detected\n\tCode 2 : Cloud Detected Overhead and Small charge activity detected (< 500 V/m range)\n\tCode 3 : Cloud Detected Overhead and Large charge activity detected (> 500 V/m range)\n\tCode 4 : Lightning Detected. Do Not Fly (Unless Safe to Do So)\n\t\n\t\"\"\"\n\t\n\tdef __init__(self, Location, Date_Start, Date_End, Save_Plot=True, Save_Dir=\"\", File_Name=None, High_Grade=False, Print_Progress=True, Return_Data=False):\n\t\t\n\t\t#TEMP\n\t\tif Location == 'Chilbolton': \n\t\t\t_cprint(\"[INFO] PG_Report isn't supported for Chilbolton in this version :(\", type='warning')\n\t\t\treturn\n\t\t\n\t\t#Specify all available data locations. N.B. Only low grade data used as a report typically requires upto date PG data\n\t\tself.Directories = Directories['Low']\n\t\t\n\t\t#Set-up data importer\n\t\tself.EPCC_Data = _EPCC_Importer()\t\n\t\t\n\t\t#Plotting requirements\n\t\tplt.style.use('classic') #necessary if Matplotlib version is >= 2.0.0\n\t\t\n\t\t#Set-up conditionals\n\t\tself.Save_Location = Save_Dir + 'PG_Report_' + Location + '_' + Date_Start.strftime('%Y%m%d_%H%M%S') + '_to_' + Date_End.strftime('%Y%m%d_%H%M%S') + '.txt' if File_Name is None else Save_Dir + File_Name\n\t\tself.Date_Start = Date_Start\n\t\tself.Date_End = Date_End\n\t\tself.Save_Plot = Save_Plot\n\t\tself.Print_Progress = Print_Progress\n\t\t\n\t\t#Set-up Codes\n\t\tself.Conditions = {0 : \"No Cloud of Charge Activity of Interest\",\n\t\t\t1 : \"Cloud Detected Overhead, but No Charge Activity Detected\",\n\t\t\t2 : \"Cloud Detected Overhead and Small charge activity detected (< 500 V/m range)\",\n\t\t\t3 : \"Cloud Detected Overhead and Large charge activity detected (> 500 V/m range)\",\n\t\t\t4 : \"Lightning Detected. Do Not Fly (Unless Safe to Do So)\"}\n\t\t\n\t\tself.Clouds = {1 : 'Clear',\n\t\t\t2 : 'Overcast',\n\t\t\t3 : 'Stratiform',\n\t\t\t4 : 'Cumuliform',\n\t\t\t5 : 'Unclassified'}\n\t\t\t\n\t\t#Set-up PG_Plotter\n\t\tself.PG_Data = PG_Plotter(\"All\", self.Date_Start, self.Date_End, High_Grade=False, Print_Progress=False, Return_Data=True)\n\t\t\n\t\tif Location == 'RUAO': \n\t\t\tself._RUAO()\n\t\t#elif Location == 'Chilbolton': \n\t\t#\tself._Chilbolton()\n\t\t\t\n\t\treturn\n\t\n\tdef _RUAO(self):\n\t\t\n\t\t#Get RUAO Data\n\t\tRUAO_Data = self.PG_Data._RUAO(Return_Data=True, Expanded_Data=True)\n\t\t\n\t\t#Convert RUAO_Data to individual components\n\t\tRUAO_Time = _dt2hours(RUAO_Data[:,0].astype('datetime64[s]'), RUAO_Data[:,0].astype('datetime64[s]')[0])\n\t\tRUAO_PG = RUAO_Data[:,1].astype(float)\n\t\tRUAO_Sg = RUAO_Data[:,2].astype(float)\n\t\tRUAO_Sd = RUAO_Data[:,3].astype(float)\n\t\t\n\t\t############################################################################\n\t\t\"\"\"Collect All Data for Classification\"\"\"\n\t\t\n\t\t#[Step 1] Calculate Descriptive Statistics\n\t\tRUAO_Stats = _stats(RUAO_PG)\n\t\t\n\t\t#[Step 2] Calculate Step Detection\n\t\tPG_Step_Time, PG_Step_Mag = _Buffer_Tips_TopDown_(RUAO_PG, RUAO_Time, thres='dynamic', abs=True, output=False)\n\t\t\n\t\t#[Step 3] Lightning Detection\n\t\tLightning_Detected = _LightningDetector(RUAO_PG)\n\t\t\n\t\t#[Step 4] FFT of PG\n\t\t\n\t\t#[Step 5] Cloud Type Detection\n\t\tCloud_Time, Cloud_Type = _Cloud_Identifer(RUAO_Data[:,0].astype('datetime64[s]'), RUAO_Sg, RUAO_Sd)\n\t\t\n\t\t############################################################################\n\t\t\"\"\"Classify\"\"\"\n\t\t\n\t\t#Classify the cloud type and charge\n\t\tif stats.mode(Cloud_Type)[0][0] > 1:\n\t\t\tif RUAO_Stats['range'] > 75:\n\t\t\t\tif PG_Step_Mag.size > 1: #Step Detection Detected Charge\n\t\t\t\t\tif RUAO_Stats['range'] > 500:\n\t\t\t\t\t\tif Lightning_Detected > 1:\n\t\t\t\t\t\t\tSelected_Code = 4\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tSelected_Code = 3\n\t\t\t\t\telse:\n\t\t\t\t\t\tSelected_Code = 2\n\t\t\t\telse:\n\t\t\t\t\tSelected_Code = 1\n\t\t\telse:\n\t\t\t\tSelected_Code = 1\n\t\telse:\n\t\t\tSelected_Code = 0\n\t\t\n\t\t#Save information to file\n\t\tif self.Save_Plot is True:\n\t\t\twith open(self.Save_Location, 'w') as f:\n\t\t\t\tf.write('Date/Time: %s UTC\\r\\n' % (RUAO_Data[:,0].astype('datetime64[s]').astype(datetime)[-1].strftime(\"%Y/%m/%d %H:%M:%S\")))\n\t\t\t\tf.write('Location: RUAO\\r\\n')\n\t\t\t\tf.write('Cloud Type: %s\\r\\n\\r\\n' % (self.Clouds[stats.mode(Cloud_Type)[0][0]]))\n\t\t\t\tf.write('Statistics\\r\\n----------\\r\\n')\n\t\t\t\tf.write(\"Min = %.2f V/m\\r\\n\" % RUAO_Stats['min'])\n\t\t\t\tf.write(\"Max = %.2f V/m\\r\\n\" % RUAO_Stats['max'])\n\t\t\t\tf.write(\"Mean = %.2f V/m\\r\\n\" % RUAO_Stats['mean'])\n\t\t\t\tf.write(\"Median = %.2f V/m\\r\\n\" % RUAO_Stats['median'])\n\t\t\t\tf.write(\"Std. = %.2f V/m\\r\\n\" % RUAO_Stats['sd'])\n\t\t\t\tf.write(\"Range (Max-Min) = %.2f V/m\\r\\n\" % RUAO_Stats['range'])\n\t\t\t\tf.write(\"Noise (Median/Std) = %.2f V/m\\r\\n\" % RUAO_Stats['noise'])\n\t\t\t\tf.write(\"75%% Interquartile Range = %.2f V/m\\r\\n\" % RUAO_Stats['iqr_75'])\n\t\t\t\tf.write(\"90%% Interquartile Range = %.2f V/m\\r\\n\\r\\n\" % RUAO_Stats['iqr_90'])\n\t\t\t\tf.write(\"PG Variability\\r\\n-------------\\r\\n\")\n\t\t\t\tf.write(\"Number of Step Detection: %s\\r\\n\" % PG_Step_Mag.size)\n\t\t\t\tf.write(\"Largest Step Detection: %.0f V/m/s\\r\\n\" % np.nanmax(PG_Step_Mag) if PG_Step_Mag.size > 0 else \"Largest Step Detection: None\\r\\n\")\n\t\t\t\tf.write(\"Number of Lightning Strikes Detected: %s\\r\\n\\r\\n\" % Lightning_Detected)\n\t\t\t\tf.write(\"Report Summary\\r\\n---------------\\r\\n\")\n\t\t\t\tf.write(self.Conditions[Selected_Code])\n\t\telse:\n\t\t\t_cprint(\"PG Report at RUAO\", type='bold')\n\t\t\tprint(\"-----------------\")\n\t\t\tprint('Date/Time: %s UTC' % (RUAO_Data[:,0].astype('datetime64[s]').astype(datetime)[-1].strftime(\"%Y/%m/%d %H:%M:%S\")))\n\t\t\tprint('Location: RUAO')\n\t\t\tprint('Cloud Type: %s\\r\\n' % (self.Clouds[stats.mode(Cloud_Type)[0][0]]))\n\t\t\t_cprint('Statistics\\r\\n----------', type='bold')\n\t\t\tprint(\"Min = %.2f V/m\" % RUAO_Stats['min'])\n\t\t\tprint(\"Max = %.2f V/m\" % RUAO_Stats['max'])\n\t\t\tprint(\"Mean = %.2f V/m\" % RUAO_Stats['mean'])\n\t\t\tprint(\"Median = %.2f V/m\" % RUAO_Stats['median'])\n\t\t\tprint(\"Std. = %.2f V/m\" % RUAO_Stats['sd'])\n\t\t\tprint(\"Range (Max-Min) = %.2f V/m\" % RUAO_Stats['range'])\n\t\t\tprint(\"Noise (Median/Std) = %.2f V/m\" % RUAO_Stats['noise'])\n\t\t\tprint(\"75%% Interquartile Range = %.2f V/m\" % RUAO_Stats['iqr_75'])\n\t\t\tprint(\"90%% Interquartile Range = %.2f V/m\\r\\n\" % RUAO_Stats['iqr_90'])\n\t\t\t_cprint(\"PG Variability\\r\\n-------------\", type='bold')\n\t\t\tprint(\"Number of Step Detection: %s\" % PG_Step_Mag.size)\n\t\t\tprint(\"Largest Step Detection: %.0f V/m/s\" % np.nanmax(PG_Step_Mag) if PG_Step_Mag.size > 0 else \"Largest Step Detection: None\")\n\t\t\tprint(\"Number of Lightning Strikes Detected: %s\\r\\n\" % Lightning_Detected)\n\t\t\t_cprint(\"Report Summary\\r\\n---------------\", type='bold')\n\t\t\t_cprint(self.Conditions[Selected_Code], type='warning')\n\t\t\n\t\treturn\n\t\t\n\tdef _Chilbolton(self):\n\t\n\t\tsys.path.insert(0, '../WC2_Surface_Electrification/Python_Code/Prerequisites/modules')\n\t\timport pysftp\n\t\n\t\t#Download Data\n\t\tsave_location_Dis \t= \"/glusterfs/phd/users/th863480/WC2_Surface_Electrification/Raw_Data/Met_Data/Incoming/Disdrometer/\"\n\t\tsave_location_lidar\t= \"/glusterfs/phd/users/th863480/WC2_Surface_Electrification/Raw_Data/Lidar/Incoming/\"\n\t\tsave_location_35GHz\t= \"/glusterfs/phd/users/th863480/WC2_Surface_Electrification/Raw_Data/Radar/Incoming/35GHz/\"\n\t\t\n\t\tChobs_username = 'jgilmore'\n\t\tChobs_password = 'Gilly1992@4815162342'\n\t\t\n\t\tdate_from = self.Date_Start.date()\n\t\t\n\t\tif Chobs_Access is True:\n\t\t\twith pysftp.Connection('gate.chobs.rl.ac.uk', username=Chobs_username, password=Chobs_password) as sftp:\n\t\t\t\n\t\t\t\t###[13] 35GHz Copernicus Data ###\n\t\t\t\tprint(\"[Step 13]: Downloading 35GHz Copernicus Data\")\n\t\t\t\ttry:\n\t\t\t\t\tsftp.cwd('/radar/radar-copernicus/processed/')\n\t\t\t\t\tdates_list = Wilma_Bulk_Downloader_Radar(sftp, save_location_35GHz, date_from=date_from)\n\t\t\t\texcept:\n\t\t\t\t\tprint('[PG_Quickplotter]: Error Downloading 35GHz Data')\n\t\t\t\t\tError_Log[8] = 1\n\t\t\t\t\t\t\n\t\t\t\t###[16] DISDROMETER ###\n\t\t\t\tprint(\"[Step 16]: Downloading Disdrometer Data\")\n\t\t\t\ttry:\n\t\t\t\t\tsftp.cwd('/data/netCDF/files/cfarr-disdrometer_chilbolton')\n\t\t\t\t\tdates_list = Wilma_Bulk_Downloader_Disdrom(sftp, save_location_Dis, date_from=date_from)\n\t\t\t\texcept:\n\t\t\t\t\tprint('[PG_Quickplotter]: Error Downloading Disdrometer Data')\n\t\t\t\t\n\t\t\t\t###[17] LIDAR ###\n\t\t\t\tprint(\"[Step 17]: Downloading Lidar Data\")\n\t\t\t\ttry:\n\t\t\t\t\tsftp.cwd('/home/eoconnor/uncalibrated/cl51/')\n\t\t\t\t\tdates_list = Wilma_Bulk_Downloader_Lidar(sftp, save_location_lidar, date_from=date_from)\n\t\t\t\texcept:\n\t\t\t\t\tprint('[PG_Quickplotter]: Error Downloading Lidar Data')\n\n\t\treturn\n\t\nclass PG_Ping(object):\n\t\"\"\"This class function will determine if any potential gradient (PG) data is available \n\tfor plotting at the desired locations. Currently the locations are:\n\t\n\t1) Reading University Atmospheric Observatory (RUAO)\n\t2) Chilbolton Observatory\n\t\n\tThe date ranges for what can be plotted are also given as a printout to the console\"\"\"\n\t\n\tdef __init__(self):\n\n\t\t#Specify all available data locations\n\t\tself.Directories = {'RUAO_LowGrade' : Directories['Low']['RUAO'],\n\t\t\t'RUAO_HighGrade' : Directories['High']['RUAO'],\n\t\t\t'Chilbolton_LowGrade' : Directories['Low']['Chilbolton'],\n\t\t\t'Chilbolton_HighGrade' : Directories['High']['Chilbolton']}\n\t\n\t\t#Determine if low grade and high grade data is available for each location\n\t\tRUAO_Avail_LowGrade, RUAO_Avail_HighGrade, RUAO_Range_LowGrade, RUAO_Range_HighGrade = self._RUAO()\n\t\tChil_Avali_LowGrade, Chil_Avali_HighGrade, Chil_Range_LowGrade, Chil_Range_HighGrade = self._Chilbolton()\n\t\t\n\t\t#Print report to console\n\t\t_cprint(\"Current PG Availability Report\", type='bold')\n\t\tprint(\"------------------------------\\n\")\n\t\t_cprint(\"Location | Available (LG)? Available (HG)? Date Start (LG) Date End (LG) Date Start (HG) Date End (HG)\", type='underline')\n\t\tprint(\"RUAO\", \" | \", RUAO_Avail_LowGrade, \" \", RUAO_Avail_HighGrade, \" \", RUAO_Range_LowGrade[0], \" \", RUAO_Range_LowGrade[1], \" \", RUAO_Range_HighGrade[0], \" \", RUAO_Range_HighGrade[1])\n\t\tprint(\"Chilbolton\", \"| \t \", Chil_Avali_LowGrade, \" \", Chil_Avali_HighGrade, \" \", Chil_Range_LowGrade[0], \" \", Chil_Range_LowGrade[1], \" \", Chil_Range_HighGrade[0], \" \", Chil_Range_HighGrade[1])\n\t\tprint(\"\\n----------------------------------------------------\")\n\t\t_cprint(\"N.B. LG = Low Grade PG Data, HG = High Grade PG Data\", type=\"warning\")\n\t\tprint(\"----------------------------------------------------\")\n\t\t\n\t\t#Force exit\n\t\tsys.exit()\n\t\n\tdef _RUAO(self):\n\t\n\t\t#Import file list\n\t\tfilelist_highgrade = sorted(glob.glob(self.Directories['RUAO_HighGrade'] + '*.nc'))\n\t\t\n\t\tfilelist_lowgrade = []\n\t\tfor root, dirnames, filenames in os.walk(self.Directories['RUAO_LowGrade']):\n\t\t\t# for filename in fnmatch.filter(filenames, '*Smp1Sec.csv.gz'):\n\t\t\t\t# filelist_lowgrade.append(os.path.join(root, filename))\n\t\t\t\t\n\t\t\t# for filename in fnmatch.filter(filenames, '*Smp1Sec.csv'):\n\t\t\t\t# filelist_lowgrade.append(os.path.join(root, filename))\n\t\t\t\t\n\t\t\tfor filename in fnmatch.filter(filenames, '*SMP1*.csv'):\n\t\t\t\tfilelist_lowgrade.append(os.path.join(root, filename))\t\n\t\t\t\t\n\t\tfilelist_lowgrade = sorted(filelist_lowgrade)\n\t\t\t\t\n\t\t#Determine dates for each file\n\t\tdatelist_highgrade = np.zeros(len(filelist_highgrade), dtype=object)\n\t\tfor i in xrange(len(filelist_highgrade)): \n\t\t\tdatelist_highgrade[i] = datetime.strptime(os.path.basename(filelist_highgrade[i]), 'RUAO_PGRR_Data_1Hz_%Y%m%d.nc')\n\t\t\n\t\tdatelist_lowgrade = np.zeros(len(filelist_lowgrade), dtype=object)\n\t\tfor i in xrange(len(filelist_lowgrade)): \n\t\t\ttry:\n\t\t\t\t#datelist_lowgrade[i] = datetime.strptime(os.path.basename(filelist_lowgrade[i])[:10], '%Y-%m-%d')\n\t\t\t\tdatelist_lowgrade[i] = datetime.strptime(os.path.basename(filelist_lowgrade[i]), '%Y-SMP1-%j.csv')\n\t\t\texcept:\n\t\t\t\tdatelist_lowgrade[i] = datetime(1900,1,1)\n\t\n\t\t#Test availability for High Grade PG data at RUAO\n\t\tif len(filelist_highgrade) != 0: \n\t\t\tdata_avaliable_highgrade = True\n\t\t\tdata_range_highgrade = [datelist_highgrade[0].strftime('%Y/%m/%d'), datelist_highgrade[-1].strftime('%Y/%m/%d')]\n\t\telse:\n\t\t\tdata_avaliable_highgrade = False\n\t\t\tdata_range_highgrade = [np.nan, np.nan]\n\t\t\t\n\t\t#Test availability for Low Grade PG data at RUAO\n\t\tif len(filelist_lowgrade) != 0: \n\t\t\tdata_avaliable_lowgrade = True\n\t\t\tprint(\"datelist_lowgrade[0]\", datelist_lowgrade[0], \"datelist_lowgrade[-1]\", datelist_lowgrade[-1])\n\t\t\tdata_range_lowgrade = [datelist_lowgrade[0].strftime('%Y/%m/%d'), datelist_lowgrade[-1].strftime('%Y/%m/%d')]\n\t\telse:\n\t\t\tdata_avaliable_lowgrade = False\n\t\t\tdata_range_lowgrade = [np.nan, np.nan]\n\t\t\t\n\t\treturn data_avaliable_lowgrade, data_avaliable_highgrade, data_range_lowgrade, data_range_highgrade\n\t\n\tdef _Chilbolton(self):\n\t\n\t\t#Import file list\n\t\tfilelist_lowgrade = sorted(glob.glob(self.Directories['Chilbolton_LowGrade'] + '*1Sec.csv'))\n\t\tfilelist_highgrade = sorted(glob.glob(self.Directories['Chilbolton_HighGrade'] + '*.nc'))\n\t\t\n\t\t#Determine dates for each file\n\t\tdatelist_lowgrade = np.zeros(len(filelist_lowgrade), dtype=object)\n\t\tfor i in xrange(len(filelist_lowgrade)): \n\t\t\ttry:\n\t\t\t\tdatelist_lowgrade[i] = datetime.strptime(os.path.basename(filelist_lowgrade[i]), \"%Y-%m-%d-PG1Sec.csv\")\n\t\t\texcept:\n\t\t\t\ttry:\n\t\t\t\t\tdatelist_lowgrade[i] = datetime.strptime(os.path.basename(filelist_lowgrade[i]), \"%Y-%m-%d_PG1Sec.csv\")\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"ERROR\", os.path.basename(filelist_lowgrade[i]))\n\t\t\t\t\tdatelist_lowgrade[i] = datetime(1900,1,1)\n\t\t\t\t\tcontinue\n\t\t\t\n\t\tdatelist_lowgrade = np.sort(datelist_lowgrade, kind='mergesort')\n\t\tdatelist_lowgrade = datelist_lowgrade[datelist_lowgrade > datetime(1900,1,1)]\n\t\t\n\t\tdatelist_highgrade = np.zeros(len(filelist_highgrade), dtype=object)\n\t\tfor i in xrange(len(filelist_highgrade)): \n\t\t\ttry:\n\t\t\t\tdatelist_highgrade[i] = datetime.strptime(os.path.basename(filelist_highgrade[i]), \"Field_Mill_PG_Chilbolton_1sec_%Y%m%d.nc\")\n\t\t\texcept:\n\t\t\t\tdatelist_highgrade[i] = datetime(1900,1,1)\n\t\t\t\t\n\t\tdatelist_highgrade = np.sort(datelist_highgrade, kind='mergesort')\n\t\tdatelist_highgrade = datelist_highgrade[datelist_highgrade > datetime(1900,1,1)]\n\t\t\n\t\t#Test availability for Low Grade PG data at Chilbolton\t\n\t\tif len(filelist_lowgrade) != 0: \n\t\t\tdata_avaliable_lowgrade = True\n\t\t\tdata_range_lowgrade = [datelist_lowgrade[0].strftime('%Y/%m/%d'), datelist_lowgrade[-1].strftime('%Y/%m/%d')]\n\t\telse:\n\t\t\tdata_avaliable_lowgrade = False\n\t\t\tdata_range_lowgrade = [np.nan, np.nan]\t\t\n\t\t\t\n\t\t#Test availability for High Grade PG data at Chilbolton\t\n\t\tif len(filelist_highgrade) != 0: \n\t\t\tdata_avaliable_highgrade = True\n\t\t\tdata_range_highgrade = [datelist_highgrade[0].strftime('%Y/%m/%d'), datelist_highgrade[-1].strftime('%Y/%m/%d')]\n\t\telse:\n\t\t\tdata_avaliable_highgrade = False\n\t\t\tdata_range_highgrade = [np.nan, np.nan]\n\t\t\t\n\t\treturn data_avaliable_lowgrade, data_avaliable_highgrade, data_range_lowgrade, data_range_highgrade\n\nclass _totalarray:\n\t\"\"\"https://stackoverflow.com/questions/7133885/fastest-way-to-grow-a-numpy-numeric-array\"\"\"\n\t\n\tdef __init__(self):\n\t\tself.data = []\n\t\tself.flatten = lambda l: [item for sublist in l for item in sublist]\n\t\t\n\tdef update(self, row):\n\t\tfor r in row:\n\t\t\tself.data.append(r)\n\n\tdef finalize(self, dtype=float):\n\t\treturn np.array(self.flatten(self.data), dtype=dtype)\n\t\t\nclass _SPMods(object):\n\t\"\"\"This class object will be used to define all the code for each plotting\n\ttype that we can plot. This includes the methods on how to plot the radar\n\treflectivity, rain gauges and potential gradient. Then we can choose in the\n\tmain modules (e.g. Daily and Zoomed) which order and what data to plot with\n\tout the need to move, shift and renumber lots of code constantly.\"\"\"\n\n\tflatten = lambda self, l: [item for sublist in l for item in sublist]\n\t\n\tdef __init__(self, numplots, plot_title, y_upper_lim=14, Super_Time_Median=None, TimeRange=None, DateRange=None):\n\t\t\"\"\"Initialise the plot\"\"\"\n\t\t\n\t\tplt.clf()\n\t\tplt.close()\n\t\tself.f, self.ax = plt.subplots(numplots, sharex=True)\n\t\tif numplots == 1: self.ax = np.array([self.ax])\n\t\tself.ylim = y_upper_lim\n\t\tself.ax[0].set_title(plot_title)\n\t\tfor subplot in self.ax: subplot.minorticks_on()\n\t\t\n\t\tself.colorbarpos = [0, 0, 0, 0, 0.020, -0.005]\n\t\tself.DateRange = DateRange\n\t\t\n\t\tself.f.subplots_adjust(hspace=0)\n\t\tplt.setp([a.get_xticklabels() for a in self.f.axes[:-1]], visible=False)\n\t\t\t\t\n\t\t#Define plot size\n\t\tfig=plt.gcf()\n\t\tfig.set_size_inches(8.3, (11.7/6)*self.ax.size) #A4 Size\n\t\t\n\t\t#Set subplot number\n\t\tself.spnum = 0\n\t\t\n\tdef _PlotError(self, msg):\n\t\t\"\"\"Used to output error message to the plot\"\"\"\n\t\n\t\tErrorCode = True\n\t\t\n\t\t#Build a rectangle in axes coordinates\n\t\tleft, width = .25, .5\n\t\tbottom, height = .25, .5\n\t\tright = left + width\n\t\ttop = bottom + height\n\n\t\t#Add text to centre of subplot\n\t\tself.ax[self.spnum].text(0.5*(left + right), 0.5*(bottom + top), msg,\n\t\t\thorizontalalignment='center',\n\t\t\tverticalalignment='center',\n\t\t\ttransform=self.ax[self.spnum].transAxes, \n\t\t\talpha=0.5, fontsize=20, color='red')\n\t\t\n\t\tself.spnum += 1\t\n\t\t\n\t\treturn ErrorCode\n\t\n\tdef _PlotNumCheck(self):\n\t\t\"\"\"Checks to see if number of called plots reaches the limit\"\"\"\n\t\t\n\t\tif len(self.ax) == self.spnum: \n\t\t\twarnings.warn(\"Can't fit all plots into superplots, a limit of %d plots has been set. The following plot call cause this warning: \" % (len(self.ax)), SyntaxWarning, stacklevel=3)\n\t\t\treturn False\n\t\n\tdef _FixXdates(self):\n\t\t\"\"\"Fixes the labelling issue for the x axis\"\"\"\n\t\t\n\t\tif self.DateRange is not None: #Long time ranges\n\t\t\t\n\t\t\tTimeLength = (self.DateRange[1] - self.DateRange[0]).total_seconds() + 1\n\t\t\tif TimeLength/86400 <= 2:\n\t\t\t\t\"\"\"Short Range: ~Single Day\"\"\"\n\t\t\t\tmyFmt = DateFormatter('%H:%M')\n\t\t\t\tself.ax[-1].xaxis.set_major_formatter(myFmt)\n\t\t\t\tself.ax[-1].xaxis.set_major_locator(MinuteLocator(interval=int(np.floor((TimeLength/60)/6))))\n\t\t\telif TimeLength/86400 <= 7:\n\t\t\t\t\"\"\"Medium Range: ~Multiple Days\"\"\"\n\t\t\t\tmyFmt = DateFormatter('%Y-%m-%d %H:%M') #Use this when plotting multiple days (e.g. monthly summary)\n\t\t\t\tself.ax[-1].xaxis.set_major_formatter(myFmt)\n\t\t\t\tself.ax[-1].xaxis.set_major_locator(HourLocator(interval=int(round((TimeLength/3600)/6))))\n\t\t\telse:\n\t\t\t\t\"\"\"Long Range: ~Months\"\"\"\n\t\t\t\tmyFmt = DateFormatter('%Y-%m-%d') #Use this when plotting multiple days (e.g. monthly summary)\n\t\t\t\tself.ax[-1].xaxis.set_major_formatter(myFmt)\n\t\t\t\tself.ax[-1].xaxis.set_major_locator(DayLocator(interval=int(round((TimeLength/86400)/6))))\n\t\t\tself.ax[-1].set_xlabel('Time (UTC) between ' + self.DateRange[0].strftime('%d/%m/%Y %H:%M:%S') + \" and \" + self.DateRange[1].strftime('%d/%m/%Y %H:%M:%S'))\n\t\t\t\n\t\telse:\n\t\t\tay=plt.gca()\n\t\t\tay.xaxis.set_major_locator(MultipleLocator(4))\n\t\t\n\t\t[tick.set_rotation(90) for subplot in self.ax for tick in subplot.get_xticklabels()]\n\t\t\n\tdef _PlotSave(self, Location, bbox_inches='tight', pad_inches=0.1, dpi=300):\n\t\t\"\"\"Saves plots specified by Location and closes all plots to reduce the memory usage\"\"\"\n\t\t\n\t\t#Fix date labelling on x axis\n\t\tself._FixXdates()\t\t\t\n\t\t\n\t\tplt.savefig(Location, bbox_inches=bbox_inches, pad_inches=pad_inches, dpi=dpi)\t\t\n\t\tplt.close()\n\t\tplt.clf()\n\t\n\tdef Show(self):\n\t\t\"\"\"Shows the plot on screen\"\"\"\n\t\t\n\t\t#Fix date labelling on x axis\n\t\tself._FixXdates()\n\t\t\t\n\t\ttry:\n\t\t\tplt.show()\n\t\texcept:\n\t\t\tprint(\"[Error]: Sorry but we can't display the plot to screen\")\n\t\t\n\tdef Field_Mill(self, Field_Mill_Time, Field_Mill_PG, Time_Range=[0,24], Field_Mill_Type='Relative', PG_Step_Time=None, PG_Step_Mode=None, Step_Colour_Type='BW', PG_Modelled=None, PG_Measured=None, PG_Stats=None, Field_Mill_Range=None, Fixed_Range=None, Overlap_Ticks=False):\n\t\t\n\t\tif self._PlotNumCheck() == False: return\n\t\n\t\tif len(self.ax) == self.spnum: return \n\t\t\n\t\tif Field_Mill_Time is not None:\n\t\t\n\t\t\t#Alter plot scales based on conditionals\n\t\t\tif Field_Mill_Type == 'Fixed' and Fixed_Range is None: self.ax[self.spnum].set_ylim([-1000,1000])\n\t\t\tif Fixed_Range is not None: self.ax[self.spnum].set_ylim(Fixed_Range)\n\t\t\tif Field_Mill_Range is not None: Field_Mill_Time = Field_Mill_Time[Field_Mill_Range]\n\t\t\tif Field_Mill_Range is not None: Field_Mill_PG = Field_Mill_PG[Field_Mill_Range]\n\t\t\t\n\t\t\t#Plot the field mill data\n\t\t\tself.ax[self.spnum].plot(Field_Mill_Time, Field_Mill_PG, lw=0.5)\n\t\t\tif PG_Modelled is not None: self.ax[self.spnum].plot(PG_Modelled[:,0], PG_Modelled[:,1], lw=0.5, c='black')\n\t\t\tself.ax[self.spnum].set_ylabel('PG (V/m)')\n\t\t\t\n\t\t\t#Remove overlapping ticks from subplot\n\t\t\tif Overlap_Ticks is False:\n\t\t\t\ttry:\n\t\t\t\t\tself.ax[self.spnum].yaxis.set_major_locator(MaxNLocator(nbins=len(self.ax[self.spnum].get_xticklabels()), prune='upper'))\n\t\t\t\texcept ValueError:\n\t\t\t\t\tpass\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t#Plot the step detection overlay\n\t\t\tif PG_Step_Time is not None:\n\t\t\t\tif np.sum(np.isnan(PG_Step_Time)) != 1:\n\t\t\t\t\tif Step_Colour_Type == 'RGB':\n\t\t\t\t\t\tfor k in xrange(len(PG_Step_Time)):\t\t\t\n\t\t\t\t\t\t\tself.ax[self.spnum].axvspan(PG_Step_Time[k]-1/3600,PG_Step_Time[k]+1/3600, facecolor=Step_Colours[Step_Mod2Col[PG_Step_Mode[k]]], alpha=0.3)\n\t\t\t\t\telif Step_Colour_Type == 'BW':\n\t\t\t\t\t\tfor k in xrange(len(PG_Step_Time)):\t\t\t\n\t\t\t\t\t\t\tself.ax[self.spnum].axvspan(PG_Step_Time[k]-1/3600,PG_Step_Time[k]+1/3600, facecolor='Black', alpha=(0.5/6)*Step_Mod2Col[PG_Step_Mode[k]]+0.1, **fill_kwargs)\n\t\t\t\t\t\t\n\t\t\t#Old Version: Removed Step_Range. You include then when calling this function\n\t\t\t# if PG_Step_Time is not None:\n\t\t\t\t# if np.sum(np.isnan(PG_Step_Time)) != 1:\n\t\t\t\t\t# if Step_Colour_Type == 'RGB':\n\t\t\t\t\t\t# for k in xrange(len(PG_Step_Time[Step_Range])):\t\t\t\n\t\t\t\t\t\t\t# self.ax[self.spnum].axvspan(PG_Step_Time[Step_Range][k]-1/3600,PG_Step_Time[Step_Range][k]+1/3600, facecolor=Step_Colours[Step_Mod2Col[PG_Step_Mode[Step_Range][k]]], alpha=0.3)\n\t\t\t\t\t# elif Step_Colour_Type == 'BW':\n\t\t\t\t\t\t# for k in xrange(len(PG_Step_Time[Step_Range])):\t\t\t\n\t\t\t\t\t\t\t# self.ax[self.spnum].axvspan(PG_Step_Time[Step_Range][k]-1/3600,PG_Step_Time[Step_Range][k]+1/3600, facecolor='Black', alpha=(0.5/6)*Step_Mod2Col[PG_Step_Mode[Step_Range][k]]+0.1, **fill_kwargs)\n\t\t\t\n\t\telse:\n\t\t\tErrorCode = self._PlotError('Missing Field Mill File')\n\t\t\n\t\t#Set the width of the plot\n\t\ttry:\n\t\t\tself.ax[self.spnum].set_xlim(Time_Range[0], Time_Range[-1])\n\t\texcept:\n\t\t\tprint(\"[Error] Setting Superplot Time Range Failed!\")\n\t\t\tself.ax[self.spnum].set_xlim([0, 24])\n\t\t\t\t\n\t\t#Add dashed grey grid to plot\n\t\tself.ax[self.spnum].grid(which='major',axis='both',c='grey')\n\t\t\n\t\t#Add annotations to plot\n\t\tif Field_Mill_Type == 'Fixed':\n\t\t\tself.ax[self.spnum].annotate(\"Potential Gradient (Fixed Scale)\", xy=(0, 1), xycoords='axes fraction', xytext=(20, -20), textcoords='offset pixels', horizontalalignment='left', verticalalignment='top', fontsize=10)\n\t\telif Field_Mill_Type == 'Relative':\n\t\t\tself.ax[self.spnum].annotate(\"Potential Gradient (Relative Scale)\", xy=(0, 1), xycoords='axes fraction', xytext=(20, -20), textcoords='offset pixels', horizontalalignment='left', verticalalignment='top', fontsize=10)\n\t\telse:\n\t\t\tself.ax[self.spnum].annotate(Field_Mill_Type, xy=(0, 1), xycoords='axes fraction', xytext=(20, -20), textcoords='offset pixels', horizontalalignment='left', verticalalignment='top', fontsize=10)\n\t\t\n\t\t#Add statistics of point charge modelling if available\n\t\tif PG_Modelled is not None:\n\t\t\tr_value, p_value, std_err = PG_Stats\n\t\t\tself.ax[self.spnum].annotate(\"$R^{2}$ = %.4f \\n$P-Value$ = %.4f \\n$SE$ = %.4f\" % (r_value, p_value, std_err), xy=(1, 1), xycoords='axes fraction', fontsize=10, xytext=(-3, -3), textcoords='offset points', ha='right', va='top')\n\t\t\t\t\n\t\t#Update self.spnum\n\t\tself.spnum += 1\n\t\t\n\t\treturn\n\nclass _EPCC_Importer(object):\n\t\"\"\"This class variable groups together all of the importing tasks required\n\tfor the Chilbolton dataset. Many of these variables could be merged together,\n\tspecifically the files using netCDF as you could just specify file loc. and\n\tvariable name when required, but this has not been done for readability sakes.\"\"\"\n\n\tdef __init__(self):\n\t\tself.QC = _EPCC_QC()\n\t\n\tdef FieldMill_Calibrate(self, PGFile, Data_Level=1, hours2dt=False, unpack=True):\n\t\t\"\"\"Imports and calibrates data from CR1000 field mill.\n\t\t\n\t\tParameters\n\t\t----------\n\t\tPGFile : str\n\t\t\tThe file location of the PG data you want to import\n\t\tPGFile_Date : datetime\n\t\t\tN.B. PGFile_Date is a redundent parameter but is kept for cross \n\t\t\tcompatability\n\t\tData_Level : int, optional\n\t\t\tUsed to specify which data level you are importing.\n\t\t\t\n\t\t\tData_Level == 0 : RAW data (e.g. CSV files)\n\t\t\tData_Level == 1 : Processed data (e.g. netCDF files)\n\t\t\t\n\t\t\"\"\"\n\t\t\n\t\tif Data_Level == 1: #For Processed Data: This is default\n\t\t\ttry:\n\t\t\t\tif PGFile != np.string_(0):\n\t\t\t\t\tPG_Data = NetCDFFile(PGFile, 'r')\n\t\t\texcept IOError:\n\t\t\t\treturn None, None\n\t\t\t\n\t\t\tTime = PG_Data.variables['time'][:]\n\t\t\tPG = PG_Data.variables['pg'][:]\n\n\t\t\tif hours2dt is True:\n\t\t\t\tif not PG_Data.variables['time'].units == 'hr':\n\t\t\t\t\tEpoch_Date = datetime.strptime(PG_Data.variables['time'].units, \"hours since %Y-%m-%d 00:00:00.0\")\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tEpoch_Date = datetime.strptime(os.path.basename(PGFile), \"Field_Mill_PG_Chilbolton_1sec_%Y%m%d.nc\")\n\t\t\t\tTime = _hours2dt(Time, Epoch_Date, dtype='datetime64')\n\t\n\t\t\t\t#Remove any None values inside Time array\n\t\t\t\tTime[np.equal(Time, None)] = np.datetime64(\"1900-01-01\")\n\t\t\t\t\n\t\t\tPG_Data.close()\n\t\t\t\n\t\t\t\n\t\t\tif unpack is True:\n\t\t\t\treturn Time, PG\n\t\t\telse:\n\t\t\t\treturn np.array([Time, PG]).T\n\t\t\t\t\t\n\t\telif Data_Level == 0:\n\t\t\t\"\"\"Import the RAW PG dataset\"\"\"\n\t\t\t\n\t\t\ttry:\n\t\t\t\t#Import data to panda data frame (Quickest import method for CSV files)\n\t\t\t\tRaw_PG_Data = pd.read_csv(PGFile, sep=',', skiprows=4, header=None, names=('time', 'count', 'pg'), parse_dates=['time'], date_parser = pd.to_datetime, dtype={'time': np.str, 'count': np.int32, 'pg': np.float64}, na_values='NAN')\n\n\t\t\t\tTime = np.array(Raw_PG_Data['time'], dtype='datetime64[us]')\n\t\t\t\n\t\t\t\tepoch = Time[0]\n\t\t\t\tHour_Frac = _dt2hours(Time, epoch)\n\t\t\t\tTime, PG = self.QC.FieldMill_QC(Time, Hour_Frac, Raw_PG_Data['pg'])\n\t\t\t\tTime = _hours2dt(Time, epoch)\n\t\t\t\n\t\t\t\treturn np.array([Time, PG]).T\n\t\t\t\n\t\t\texcept:\n\t\t\t\treturn None\n\n\t\n\tdef RUAO_Calibrate(self, RUAOFile, Col=(0,1), NC_File=False, unpack=False, Expanded_Data=False):\n\t\t\"\"\"Import columns from the RUAO dataset\"\"\"\n\t\t\n\t\tif NC_File is False:\n\t\t\t#Need to get columns first as there are rows to skip after header!\n\t\t\tcolumns = pd.read_csv(RUAOFile, sep=',', header=0, low_memory=False).columns\n\t\t\t\n\t\t\tRUAOData = pd.read_csv(RUAOFile, sep=',', skiprows=4, names=columns,\n\t\t\t\tparse_dates=['TimeStamp'], date_parser = lambda d: pd.to_datetime(d, format=\"%d/%m/%Y %H:%M:%S\"),\n\t\t\t\tna_values='nan', low_memory=False).to_records(index=False)\n\t\t\t\n\t\t\tif len(Col) == 2:\n\t\t\t\n\t\t\t\treturn np.vstack((RUAOData['TimeStamp'].astype('datetime64[s]').astype(datetime), RUAOData['PG'])).T\n\t\t\t\t\n\t\t\telse:\n\t\t\t\t\n\t\t\t\treturn np.vstack((RUAOData['TimeStamp'].astype('datetime64[s]').astype(datetime), RUAOData['PG'], RUAOData['Sg'], RUAOData['Sd'])).T\n\t\t\t\n\t\t#USED FOR METFiDAS-Incoming/Level0\n\t\t# if NC_File is False:\n\t\t\t\n\t\t\t# try:\n\t\t\t\t# RUAOData = np.loadtxt(RUAOFile, dtype=str, delimiter=',', usecols=Col, unpack=False, skiprows=4)\n\t\t\t# except IOError:\n\t\t\t\t# if unpack == True:\n\t\t\t\t\t# return np.split(np.zeros(len(Col)), len(Col))\n\t\t\t\t# else:\n\t\t\t\t\t# return None\n\t\t\t\n\t\t\t# #Quality control data\n\t\t\t# try:\n\t\t\t\t# RUAOData = self.QC.RUAO_QC(RUAOData, Col)\n\t\t\t# except IndexError:\n\t\t\t\t# if unpack == True:\n\t\t\t\t\t# return np.split(np.zeros(len(Col)), len(Col))\n\t\t\t\t# else:\n\t\t\t\t\t# return None\n\t\t\t\n\t\t\t# if unpack == True:\n\t\t\t\t# return [RUAOData[:,i] for i in xrange(RUAOData.shape[1])]\n\t\t\t# else:\n\t\t\t\t# return RUAOData\n\n\t\telse:\n\t\t\t\"\"\"For importing PROCESSED daily RUAO files\"\"\"\n\t\t\t\t\n\t\t\ttry:\n\t\t\t\tRUAO_Data = NetCDFFile(RUAOFile, 'r')\n\t\t\texcept IOError:\n\t\t\t\treturn None, None, None, None, None\n\t\t\t\t\t\n\t\t\tRUAO_Time_Daily = RUAO_Data.variables['time'][:]\n\t\t\tRUAO_PG_Daily = RUAO_Data.variables['pg'][:]\n\t\t\t\n\t\t\tEpoch_Date = datetime.strptime(RUAO_Data.variables['time'].units, \"hours since %Y-%m-%d 00:00:00.0\")\t\t\t\n\t\t\tRUAO_Time_Daily = _hours2dt(RUAO_Time_Daily, Epoch_Date, dtype='datetime64')\n\t\t\t\n\t\t\tif unpack == True:\n\t\t\t\treturn RUAO_Time_Daily, RUAO_PG_Daily\n\t\t\telse:\n\t\t\t\treturn np.array([RUAO_Time_Daily, RUAO_PG_Daily]).T\n\t\t\t\t\t\nclass _EPCC_QC(object):\n\n\tdef FieldMill_QC(self, Field_Mill_Date, Field_Mill_Time, Field_Mill_PG):\n\t\t\"\"\"Performs quality control checks on field mill logged by a CR1000\n\t\t\n\t\tSame as before but better!\n\t\t\n\t\tN.B. In Version 2 we removed the first three quality control steps\n\t\t#which are now redundant\"\"\"\n\t\t\n\t\t#[Step 1] Remove Known Bad Data\n\t\t\n\t\tLower_Bound = np.datetime64('2017-01-11 11:05:20')\n\t\tUpper_Bound = np.datetime64('2017-01-11 11:07:13')\n\t\t\n\t\tTime_Boundary = np.searchsorted(Field_Mill_Date, [Lower_Bound, Upper_Bound])\n\t\t\n\t\t#Convert pandas to numpy\n\t\tField_Mill_Time = np.asarray(Field_Mill_Time)\n\t\tField_Mill_PG = np.asarray(Field_Mill_PG, dtype=np.float64)\n\t\t\n\t\tField_Mill_Time[Time_Boundary[0]:Time_Boundary[1]] = np.nan\n\t\tField_Mill_PG[Time_Boundary[0]:Time_Boundary[1]] = np.nan\n\t\t\n\t\t#[Step 2] Calibrate\n\t\tTime_Mask = (Field_Mill_Date >= np.datetime64('2017-01-11 11:04:46'))\n\t\t\n\t\tField_Mill_PG[Time_Mask] = 17.0438+2.08768*Field_Mill_PG[Time_Mask]\n\t\tField_Mill_PG[~Time_Mask] = 17.0438+0.208768*Field_Mill_PG[~Time_Mask]\n\t\t\n\t\treturn Field_Mill_Time, Field_Mill_PG\n\t\n\tdef RUAO_QC(self, ruaofile, col):\n\n\t\tif len(col) > 1:\n\t\t\tif col[0] == 0:\n\t\t\t\truaofile = np.array(ruaofile, dtype=object)\n\t\t\t\truaofile[:,0] = _Excel_to_Python_Date(ruaofile[:,0], strip=False)\n\t\telse:\n\t\t\tif col[0] == 0:\n\t\t\t\truaofile = _Excel_to_Python_Date(ruaofile, strip=False)\n\n\t\treturn ruaofile\n\n############################################################################\n\"\"\"Functions\"\"\"\n\ndef _backend_changer(backend='Qt4Agg'):\n\t\"\"\"Changes the Matplotlib back-end as certain configurations of code can stop any plots\n\tbeing created. E.g. using 'screen' in an interactive terminal requires 'Agg' back-end.\n\tRunning PG_Quickplotter directly from the command line can use the default back-end of \n\t'Qt4Agg'\n\t\n\tParameters\n\t----------\n\tbackend : str, optional, default = 'Qt4Agg'\n\t\tThe matplotlib backend name you want to change to. The options available can be found\n\t\tby running \n\t\"\"\"\n\t\n\tif isinstance(backend, str):\n\t\twith warnings.catch_warnings():\n\t\t\twarnings.simplefilter(\"ignore\")\n\t\t\t\n\t\t\tplt.switch_backend(backend)\n\telse:\n\t\traise ValueError(\"[_backend_changer] Backend needs to be a string. We got %s. Use _backend_checker to see that available Matplotlib backends for your system.\" % backend)\n\t\n\tprint(\"[INFO] Matplotlib backend has been changed to %s\" % backend)\n\t\ndef _backend_checker(show_supported=False, show_valid=False):\n\t\"\"\"Checks the available back-ends in Matplotlib and then tests each supported back-end\n\tto see if they can actually be used in the environments configurations\"\"\"\n\t\n\tdef _is_backend_module(fname):\n\t\t\"\"\"Identifies if a filename is a Matplotlib backend module\"\"\"\n\t\treturn fname.startswith('backend_') and fname.endswith('.py')\n\n\tdef _backend_fname_formatter(fname): \n\t\t\"\"\"Removes the extension of the given filename, then takes away the leading 'backend_'.\"\"\"\n\t\treturn os.path.splitext(fname)[0][8:]\n\n\t# get the directory where the back-ends live\n\tbackends_dir = os.path.dirname(matplotlib.backends.__file__)\n\n\t# filter all files in that directory to identify all files which provide a backend\n\tbackend_fnames = filter(_is_backend_module, os.listdir(backends_dir))\n\n\tbackends = [_backend_fname_formatter(fname) for fname in backend_fnames]\n\n\tif show_supported is True: print(\"Supported Backends: \\t %s \" % backends)\n\n\t#Validate Back-ends\n\tbackends_valid = []\n\tfor b in backends:\n\t\ttry:\n\t\t\tplt.switch_backend(b)\n\t\t\tbackends_valid += [b]\n\t\texcept:\n\t\t\tcontinue\n\n\tif show_valid is True: print(\"Valid Backends: \\t %s\" % backends_valid)\n\t\n\twith warnings.catch_warnings():\n\t\twarnings.simplefilter(\"ignore\")\n\n\t\t#Try Back-ends Performance\n\t\tbackends_available = []\n\t\tbackends_fps = []\n\t\tfor b in backends_valid:\n\t\t\ttry:\n\t\t\t\t#plt.ion()\t\t\t\t\t\t\t\t#Turn on interactive mode\n\t\t\t\t\n\t\t\t\tplt.switch_backend(b)\t\t\t\t\t#Switch back-end to test\n\n\t\t\t\tplt.clf()\n\t\t\t\ttstart = systime.time() \t\t#For time keeping\n\t\t\t\tx = np.arange(0,2*np.pi,0.01) #X Array\n\t\t\t\tline, = plt.plot(x,np.sin(x))\n\t\t\t\tfor i in xrange(1,50):\n\t\t\t\t\tline.set_ydata(np.sin(x+i/10.0))\t#Update the data\n\t\t\t\t\tplt.draw() \t#Redraw the canvas\n\n\t\t\t\t#plt.ioff()\n\t\t\t\t\n\t\t\t\t#If nothing went wrong during FPS check then this is a viable back-end to use\n\t\t\t\tbackends_available.append(b)\n\t\t\t\tbackends_fps.append(50/(systime.time()-tstart))\n\t\t\texcept:\n\t\t\t\tpass\n\t\n\t#Sort lists\n\tbackends_fps, backends_available = (list(t) for t in zip(*sorted(zip(backends_fps, backends_available), reverse=True)))\n\t\n\tprint(\"Available Backends to use with Matplotlib\\n-----------------------------------------\\n\")\n\tprint(\"Backend FPS\\n---------------------------\")\n\tfor backend, fps in zip(backends_available, backends_fps):\n\t\tprint(\"%s \\t%.4f\" % (backend, fps))\n\t\n\t#Force exit\n\tsys.exit()\n\t\ndef _cprint(message, type):\n\t\"\"\"Prints colourful messages under a standard style set\"\"\"\n\t\n\t#Console Colours\n\tbcolours = {\n\t\t\"header\" : '\\033[95m',\n\t\t\"okblue\" : '\\033[94m',\n\t\t\"okgreen\" : '\\033[92m',\n\t\t\"warning\" : '\\033[31m',\n\t\t\"fail\" : '\\033[91m',\n\t\t\"endc\" : '\\033[0m',\n\t\t\"bold\" : '\\033[1m',\n\t\t\"underline\" : '\\033[4m'}\n\t\t\n\tprint(bcolours[type] + message + bcolours['endc'])\n\t\ndef _DatetimeFormat(date, format, check=False):\n\t\"\"\"Formats and checks the validity of a datetime\n\tformat as specified by the user.\n\t\n\tParameters\n\t----------\n\tdate : str or list\n\t\tthe datetime string you want to check for being \n\t\tformatted in the correct format.\n\tformat : str\n\t\tthe format for the datetime to be formatted in.\n\t\tE.g. %d/%m/%Y_%H:%M:%S\n\t\tCheck http://strftime.org/ if you're unsure.\n\tcheck : bool, optional\n\t\twhether to only check the format and return\n\t\tboolean indicators.\"\"\"\n\t\n\tif isinstance(date, str):\n\t\tif check is True:\n\t\t\ttry:\n\t\t\t\tdatetime.strptime(date, format)\n\t\t\t\treturn True\n\t\t\texcept ValueError:\t\n\t\t\t\treturn False\n\t\telse:\t\n\t\t\ttry:\n\t\t\t\treturn datetime.strptime(date, format)\n\t\t\texcept ValueError:\t\n\t\t\t\treturn np.nan\n\telse:\n\t\tPython_Datetime = np.zeros(len(date), dtype=object)\n\t\tfor i in xrange(len(date)):\n\t\t\ttry:\n\t\t\t\tPython_Datetime[i] = datetime.strptime(date[i], format)\n\t\t\texcept TypeError:\n\t\t\t\tcontinue\n\t\t\t\t\n\t\treturn Python_Datetime\n\ndef _Excel_to_Python_Date(excel_time, expand=None, format=None, strip=True):\n\t\"\"\"Converts Excel formatted time into a python format\n\t\n\tParameters\n ----------\n time : numpy array\n 1 dimensional time series array attached to data stream to determine the timing a step\n\t\twas detected\n\texpand : boolean, optional\n\t\tOutputs an expanded array of time variants over the broad datetime format. This is\n\t\tuseful when requiring year day or fraction hours of the day\n\tformat : str, optional\n\t\tif the excel string is in a different format then you can specify this here\n\tstrip : boolean, optional\n\t\tstrips the time component from datetime objects while leaving them intact\n\t\n Returns\n -------\n Python_Datetime : numpy array, float\n The date and time of each timestamp in datetime python format\n\tPython_Date : numpy array, float\n The date of each timestamp in datetime python format\n\tPython_Year : numpy array, int, optional\n\t\tThe year at each time step\n\tPython_YD : numpy array, int, optional\n\t\tThe day of the year at each timestep\n\tPython_Hour : numpy array, float, optional\n\t\tThe fraction hour of each timestep assuming time has been given.\n\t\"\"\"\n\n\tPython_Datetime = np.zeros(len(excel_time), dtype=object)\n\tif format is not None: #Bespoke method\n\t\tfor i in xrange(len(excel_time)):\n\t\t\ttry:\n\t\t\t\tPython_Datetime[i] = datetime.strptime(excel_time[i], format)\n\t\t\texcept TypeError:\n\t\t\t\tcontinue\n\telse: #Standard Method\n\t\tPython_Datetime = np.array([datetime.strptime(date, '%Y-%m-%d %H:%M:%S') for date in excel_time], dtype=object)\n\n\tPython_Datetime\t= Python_Datetime[Python_Datetime != 0]\n\t\n\tif expand is not None:\n\t\t#Convert Python_Datetime to decimal hour format\n\t\tPython_Date = np.zeros_like(Python_Datetime)\n\t\tPython_Year = np.zeros_like(Python_Datetime)\n\t\tPython_YD = np.zeros_like(Python_Datetime)\n\t\tPython_H = np.zeros_like(Python_Datetime)\n\t\tfor i in xrange(len(Python_Datetime)):\n\t\t\tPython_Date[i] = Python_Datetime[i].date()\n\t\t\tPython_Year[i] = Python_Datetime[i].year\n\t\t\tPython_YD[i] \t= Python_Datetime[i].timetuple().tm_yday\n\t\t\tPython_H[i] = toHourFraction(Python_Datetime[i])\n\t\n\tif strip == True:\n\t\tPython_Datetime = toDateOnly(Python_Datetime)\n\t\n\treturn Python_Datetime if expand is None else (Python_Datetime, Python_Date, Python_Year, Python_YD, Python_H)\t\t\n\t\ndef _hours2dt(hours_since_epoch, epoch, dtype='datetime64'):\n\t\"\"\"Converts an array of values representing the number of hours since a common point\n\tknown as an epoch.\n\t\n\tParameters\n\t----------\n\thours_since_epoch : ndarray\n\t\tAn array of the hours since the epoch date\n\tepoch : str, datetime, datetime64\n\t\tThe specific datetime you want to start from. The format for epoch needs to\n\t\twork with numpy datetime64.\n\tdtype : str, optional\n\t\tSpecify if you want the output to be in numpy datetime or native \"python\"\n\t\tdatetime format.\n\t\t\n\tExample\n\t-------\n\t>>> hours_since_epoch = np.linspace(0,24,86400)\n\t>>> epoch = datetime(1992,8,17)\n\t\n\t>>> hours2dt(hours_since_epoch, epoch)\n\tarray([datetime(1992,8,17,0,0,0), datetime(1992,8,17,0,0,1) ...])\n\t\n\tNotes\n\t-----\n\tFunction is fast if you specify dtype='datetime64' and leave it in numpy format.\n\t\t\n\tReference\n\t---------\n\thttps://codereview.stackexchange.com/a/77662/151534\n\thttps://stackoverflow.com/a/13704307/8765762\n\t\"\"\"\n\t\n\tseconds = np.around(hours_since_epoch * (60*60))\n\tif dtype == 'datetime64':\n\t\treturn np.datetime64(epoch) + seconds.astype('timedelta64[s]')\n\telif dtype == 'datetime':\n\t\treturn np.array((np.datetime64(epoch) + seconds.astype('timedelta64[s]')).astype(datetime), dtype=object)\n\ndef _dt2hours(array_of_datetimes, epoch):\n\t\"\"\"Converts an array of datetime objects to hours since epoch. Reverse of hours2dt.\n\t\n\tepoch can be either a single datetime or an ndarray of datetime matching the dimensions\n\tof array_of_datetimes exactly.\"\"\"\n\t\n\tif isinstance(array_of_datetimes, np.ndarray):\n\n\t\t#check for float and int dtypes\n\t\tdtypes = ['float', 'float32', 'float128', 'int']\n\t\tif np.any(np.in1d(dtypes, array_of_datetimes.dtype)):\n\t\t\treturn array_of_datetimes\n\t\telse:\n\t\t\tif not isinstance(epoch, np.ndarray): epoch = np.datetime64(epoch) \n\t\t\tif array_of_datetimes.dtype == 'O': #Object array\n\t\t\t\treturn (toDatetime64(array_of_datetimes) - epoch.astype('datetime64[s]')).astype('timedelta64[s]').astype(float)/3600\n\t\t\telse:\n\t\t\t\treturn (array_of_datetimes.astype('datetime64[s]') - epoch.astype('datetime64[s]')).astype('timedelta64[s]').astype(float)/3600\n\telse:\n\t\tsys.exit(\"{Error} dt2hours requires an ndarray as input not a single value\")\n\t\t\ndef _Buffer_Tips_TopDown_(data, time=None, thres='dynamic', abs=False, search=(1,1,0), invert=False, output=True):\n\t\"\"\"This method provides the best estimate of tip times when the noise of the instrument is \n\tsignificant enough to suppress their recognition. When the noise of the device is extremely \n\tminimal relatively then 'Buffer_Tips_Threshold' will probably provide better results. \n\tCurrently this method produces ~95% of tips for moderate amount of noise where the remaining \n\t5% is obscured by false positives which have/have not been removed from end processing.\n\t\n\tParameters\n ----------\n data : numpy array\n One dimensional array of data to be tested by the step detection.\n time : numpy array, optional\n One dimensional array for secondary data where the results of the step detection are \n\t\tbroadcast on time. If time is set to None the indices are returned. Default is time=None.\n\tthres : float or str, optional\n\t\tSpecifies the level of noise you want to remove which have the same units as the input \n\t\tarray, data. The parameter can take float values as well as two string values:\n\t\t\n\t\t\t'dynamic' : determines the local noise levels within the data at each iteration \n\t\t\t\t\t\tand thus is a dynamic method for determining noise for a non static\n\t\t\t\t\t\tdataset\n\t\t\t'static' : determines the global noise levels of the data and specifies this value\n\t\t\t\t\t throughout the search process\n\t\t\t\t\t \n\tabs : boolean, optional\n\t\tUsed to specify that both positive and negative values can be used for step search.\n\t\tThis can be useful (i.e. abs = True) when the input data is not bounded in their \n\t\tvalues (i.e. data can be both positive and negative at the same time)\n\tsearch : tuple, optional\n\t\tUsed to specify how strict your detection should be. Specifically this determines\n\t\tif the process is forward in space, backwards in space or centred in space. The input\n\t\tshould be in the form of a tuple (e.g. (0,0,0)) where each number represents the range\n\t\tof search:\n\t\t\n\t\t\t\t\tsearch = (forward in space, centre in space, backward in space)\n\t\t\t\t\t\n\t\tThe codes are, 0 : Don't Select, and 1 : Select.\n\t\t\n\t\te.g. search = (1,1,0) means that we are searching forward and centred in space.\n\tinvert : boolean, optional\n\t\tUsed to specify if we should look for values within the noise parameter set by the \n\t\tthreshold parameter. The default is False.\n\toutput : boolean, optional\n\t\tOutputs information to the user about the step detection at each iteration.\n\n Returns\n -------\n Step_Time : numpy array\n The index or times of each detected step dependent on whether the 'time' \n\t\timput is given\n\tStep_Mag : numpy array\n\t\tThe magnitude of the step that was detected at the time index as Step_Time. The unis are \n\t\troughly related to the time derivative of the input array.\n\t\n\tWarnings\n\t--------\n\tsearch_warn : string\n\t\tIf search_forward and search_backward has been set to False, then it will raise a warning\n\t\tthat at least one of these parameters needs to be set to True. This by default will put\n\t\tsearch_forward = True.\n\t\n\tExample\n\t-------\n\tIf we have a dataset that is consisting of 86400 elements, this algorithm will first determine\n\tthe structure of the data to which it can divide itself and produce an integer, thus determining\n\thow many bins at each stage we will need. So for out dataset we will get an output out of,\n\t\n\t\t\t\t\t\t\t\t\tmul = [5,3,3,3,2,2,2,2,2,2,2],\n\t\t\t\t\t\t\t\t\n\twhere the product of mul * 5 will yield 86400 again. Therefore, for this specific size of array \n\twe start of with splitting the data into 5 equally (temporally) sized bins. Then we determine\n\tthe mean difference between each successive bin. Then for each bin we determine if this is larger\n\tthan our specified threshold, thres. The calculation of this variable depending on the user \n\tspecification. Therefore, for static we determine the signal to noise ratio (mean/std) for the\n\twhole dataset, and for dynamic we determine the signal to noise ratio for each bin. If a bin\n\twas larger than thres then we know that there was some significant changes happening in the \n\tdata and then we search at a higher resolution there again otherwise we don't bother. This adds\n\ta method of segregation for regions of interest and not. Finally once this has been completed\n\twe expand the number of bins as specified by the next number in mul (i.e. 5) and we then have\n\t25 bins. So for our first iteration if our search determined that the bins,\n\t\n\t\t\t\t\t\t\t\t\t\tbins = [0,0,1,0,1],\n\t\t\t\t\t\t\t\t\n\twhere above the threshold then we will search the appropriate arrays in the next iteration, i.e.\n\t\n\t\t\t\t\tbins = [0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,0,0,0,0,0,1,1,1,1,1]\n\t\t\t\t\t\n\tthe search area will change depending on whether what regime you set the parameter, search. This\n\tis an example of centred in space where the progression is centred on its parent iteration and\n\tdoesn't expend over to their adjacent bin members, as would be the case with forward and backward\n\tin space regimes.\n\t\"\"\"\n\n\t############################################################################\n\t\"\"\"Setting up initial variables and values\"\"\"\n\t\n\t#time[0]=time[-1]=0\t\t\t\t\t\t\t# Set the first and last value of the time series equal to 0\t\n\tStep_Total=0 \t\t\t\t\t\t\t\t# Holds the total number of steps occurring in a day\n\tmul = _get_multipler(len(data))\t\t\t\t# Get Multiplier Sequence\n\tbins_counter = mul[0] \t\t\t\t\t\t# Initial number of search areas. Exponent determines how many levels\n\tStep_Time = np.array([], dtype=object)\t\t# Holds the index or time of each step detected\n\tStep_Mag = np.array([], dtype = float)\t\t# Holds the magnitude for each step detected\n\tbins_guide = np.ones(mul[0])\t\t\t\t# Used to determine which bin to search. (1) Search; (0) Don't Search. (i.e. initially, since we haven't started searching yet we want to check all bins)\n\t\n\t###Need to fix this issue with get_mutlipler function. sometimes len(data) == np.prod(mul)*mul[0] and sometimes len(data) = np.prod(mul)\n\tif np.abs(np.prod(mul)*mul[0] - len(data)) > np.abs(np.prod(mul) - len(data)):\n\t\tstart = 1 \n\t\tsequence_size = np.prod(mul)\n\telse:\n\t\tstart = 0\n\t\tsequence_size = np.prod(mul)*mul[0]\n\t\t\n\tif output is True: print('Data Length:', len(data), 'Mutiplier Sequence:', mul, sequence_size)\n\n\tif search == (0,0,0):\n\t\twarnings.warn('\\n[Top-Down Step Detection]: search cannot be (0,0,0). Forcing search = (1,1,0)', SyntaxWarning, stacklevel=2)\n\t\tsearch = (1,1,0)\n\n\t############################################################################\t\n\t\"\"\"Buffer Tip Times\"\"\"\n\tfor k in xrange(start, len(mul)+1):\n\t\t#Initialise Iteration\n\t\tid = np.arange(bins_counter)\n\t\ttip_finder = np.zeros(bins_counter)\n\t\tbins = np.zeros(bins_counter)\n\t\tbins_noise = np.zeros(bins_counter)\n\t\t\n\t\t#Calculate mean difference for each bin\n\t\tfor i in xrange(bins_counter-1):\n\t\t\tif bins_guide[i] == 1:\n\n\t\t\t\tbins_upper \t= data[int(((i+1)/bins_counter)*len(data)):int(((i+2)/bins_counter)*len(data))]\n\t\t\t\tbins_lower \t= data[int((i/bins_counter)*len(data)):int(((i+1)/bins_counter)*len(data))]\n\t\t\t\tbins_both \t= data[int((i/bins_counter)*len(data)):int(((i+2)/bins_counter)*len(data))]\n\n\t\t\t\twith warnings.catch_warnings():\n\t\t\t\t\twarnings.simplefilter(\"ignore\", category=RuntimeWarning)\n\t\t\t\t\t\n\t\t\t\t\tbins[i] \t\t= np.nanmean(bins_upper) - np.nanmean(bins_lower)\n\t\t\t\t\tbins_noise[i] \t= np.nanmedian(bins_both)/np.nanstd(bins_both, ddof=1)\n\n\t\tif bins_guide[-1] == 1:\n\t\t\tbins_upper \t\t= data[int(((-1)/bins_counter)*len(data)):]\n\t\t\tbins_lower\t\t= data[int((-2/bins_counter)*len(data)):int(((-1)/bins_counter)*len(data))]\n\t\t\tbins_both \t\t= data[int((-2/bins_counter)*len(data)):]\n\t\t\t\n\t\t\twith warnings.catch_warnings():\n\t\t\t\twarnings.simplefilter(\"ignore\", category=RuntimeWarning)\n\t\t\t\t\n\t\t\t\tbins[-1] \t\t= np.nanmean(bins_upper) - np.nanmean(bins_lower)\n\t\t\t\tbins_noise[-1] \t= np.nanmedian(bins_both)/np.nanstd(bins_both, ddof=1)\n\t\t\t\t\n\t\t#When we specify a noise level we build need to populate bins_noise with these values\n\t\tif thres == 'static': thres = np.nanmedian([np.std(data[i:i+1000], ddof=1) for i in xrange(int(len(data)/1000))])\n\t\tif thres != 'dynamic': bins_noise = np_func([]).ljust(len(bins), thres, float)\n\t\tbins_test = np.abs(bins) if abs is True else bins\n\n\t\t#Determine whether each bin is higher than noise level\n\t\tsearcher = 0\n\t\tfor i in xrange(len(bins)):\n\t\t\tif invert is False:\n\t\t\t\tif bins_test[i] > bins_noise[i]: #0.0103: #This is used to remove the doubles in the data, kinda of lag bustin coefficient\n\t\t\t\t\ttip_finder[i] = 1 #Lets search here again\n\t\t\t\t\tsearcher+=1\n\t\t\telif invert is True:\n\t\t\t\tif bins_test[i] < bins_noise[i]: #0.0103: #This is used to remove the doubles in the data, kinda of lag bustin coefficient\n\t\t\t\t\ttip_finder[i] = 1 #Lets search here again\n\t\t\t\t\tsearcher+=1\n\t\t\telse:\n\t\t\t\tprint(\"[Error] Buffer_Tips_TopDown_v2 | invert parameter must be a boolean object. We got invert = %s\" % invert)\n\t\t\t\tsys.exit()\n\t\t\t\t\n\t\tif output is True: print(\"Number of searchers:\", searcher, \"Bin Noise\", np.nansum(bins_noise))\n\t\tif k == len(mul): continue #i.e. exit\n\n\t\t#Update for next level of search\n\t\tbins_counter *= mul[k]\n\t\tbins_guide \t = np.zeros(bins_counter)\n\n\t\t#Determines locations were to search next (i.e. if a previous level search didn't find any significant differences then why bother to continue to search)\t\t\t\t\n\t\tfor i in xrange(len(tip_finder)-1):\n\t\t\tif tip_finder[i] == 1:\n\t\t\t\tif search[2] == 1:\n\t\t\t\t\tfor j in xrange(mul[k]):\n\t\t\t\t\t\tbins_guide[mul[k]*i-(mul[k]+j-1)] = 1\n\t\t\t\tif search[1] == 1:\n\t\t\t\t\tfor j in xrange(mul[k]):\n\t\t\t\t\t\tbins_guide[mul[k]*i+j] = 1\n\t\t\t\tif search[0] == 1:\t\t\n\t\t\t\t\tfor j in xrange(mul[k]):\n\t\t\t\t\t\tbins_guide[mul[k]*i+(j+mul[k])] = 1\n\n\t\tif tip_finder[-1] == 1:\n\t\t\tif search[2] == 1:\n\t\t\t\tfor j in xrange(mul[k]):\n\t\t\t\t\tbins_guide[mul[k]*(len(tip_finder)-1)-(mul[k]+j-1)] = 1\n\t\t\tif search[0] == 1:\t\t\n\t\t\t\tfor j in xrange(mul[k]):\n\t\t\t\t\tbins_guide[mul[k]*(len(tip_finder)-1)+j] = 1\n\n\t############################################################################\n\t\"\"\"Once we have levelled down to 1s resolution then we finish searching\"\"\"\n\tif output is True: print(bins_counter, len(tip_finder), len(bins))\n\t\n\t#If the length of the data array was a prime number we need to remove the last number to make len(time) == len(tip_finder)\n\tbins = bins[:int(len(data)-len(tip_finder))] if len(data) != len(tip_finder) else bins\n\ttip_finder = tip_finder[:int(len(data)-len(tip_finder))] if len(data) != len(tip_finder) else tip_finder\n\t\n\t#Get the Time and Magnitude for each detected tip. If time was not specified then the index will be returned.\n\tStep_Time = time[np.roll(tip_finder, 1) == 1] if time is not None else np.arange(len(data))[np.roll(tip_finder, 1) == 1]\n\tStep_Mag = bins[tip_finder == 1] if time is not None else bins[tip_finder == 1]\n\t\n\tif output is True: print('No. Tips:', len(tip_finder[tip_finder==1]))\n\t\n\tStep_Time = Step_Time[np.argsort(Step_Time)] #Sorts in ascending order the time of each step\n\tStep_Mag = Step_Mag[np.argsort(Step_Time)]\n\n\treturn Step_Time, Step_Mag\n\ndef _get_multipler(array, m=2, mode='1D'):\n\t\"\"\"Determines mutipler at each step which produces a whole\n\tnumber.\n\t\n\tParameters\n\t----------\n\tarray : int\n\t\tThe number you want to divide by\n\tm : int, optional\n\t\tInitial division number. By default this is set to 2\n\t\tas this is the lowest integer divisor\n\t\t\n\tReturns\n\t-------\n\tmul : numpy array\n\t\tThe array sequence of multiplers at each step. This will be\n\t\tin reversed order so if we start at the lowest number (i.e.\n\t\tfirst number in mul) then we can mutply by the first number \n\t\tin mul and so on\n\t\n\t\"\"\"\n\t\n\tdef _is_prime(a):\n\t\treturn all(a % i for i in xrange(2, a))\n\n\t\n\t#if number is prime then remove 1 from sequence\n\tif _is_prime(array) == True: \n\t\tarray += 1\n\t\t#print(\"ITS THE PRIME OF YOUR LIFE!\")\n\t\n\t#if mode == '1D':\n\tmul = np.array([], dtype=int)\n\twhile array > m:\n\t\tarray_temp = array/m\n\t\tif array_temp == int(array_temp):\n\t\t\tarray//=m\n\t\t\tmul = np.append(mul, m)\n\t\telse:\n\t\t\twhile array_temp != int(array_temp):\n\t\t\t\tif m < 10:\n\t\t\t\t\tm += 1\n\t\t\t\t\tarray_temp = array/m\n\t\t\t\telse:\n\t\t\t\t\tarray += 1\n\t\t\t\t\tm = 2\n\t\t\t\t\tarray_temp = array/m\n\t\t\tarray//=m\n\t\t\tmul = np.append(mul, m)\n\t\n\n\tmul = np.array(list(reversed(mul)), dtype=int)\n\t\n\treturn mul\t\n\t\ndef _LightningDetector(Field_Mill_PG):\n\t\"\"\"This code is based on the lightning remover function as part of the PGRR analysis\n\tmethods. Overall this will remove lightning discharges from a PG data set using the \n\tfollowing criteria:\n\t\n\t(1) PG Gradient >+-500V/ms (Transient Finder)\n\t(2) PG >+-800V/m (Distance Parameter)\n\t\n\tThe PG removed is between -5:+55 seconds around a detected lightning discharge\n\t\"\"\"\n\n\t#Initial Parameters\n\tlight_grad = 500\n\tlight_mag = 0\n\tskip = 0\n\tLightning_Detected = 0\n\t\n\tfor j in range(len(Field_Mill_PG)-1):\n\t\t#Calculate dPG/dt\n\t\tdPG\t\t\t= np.abs(Field_Mill_PG[j+1] - Field_Mill_PG[j])\t\t\n\t\tif (dPG > light_grad) and (np.abs(Field_Mill_PG[j]) > light_mag) and (skip == 0):\n\t\t\tLightning_Detected += 1\n\t\telif skip > 0:\n\t\t\tskip -= 1\n\n\treturn \n\ndef _stats(array, extend=True, axis=None):\n\t\"\"\"Outputs the standard statistics from the input array\n\t\n\tParameters\n\t----------\n\tarray : ndarray\n\t\tThe values you want to produce a statistic for\n\textend : boolean, optional\n\t\tSpecify outputting extra statistics. The extra ones are range and noise \n\t\t(e.g. noise = median/std)\n\taxis : int, optional\n\t\tThe axis you want to provide the stats. Default is None which averages as\n\t\ta single unit.\n\t\t\n\tOutputs\n\t-------\n\tstats : ndarray\n\t\tstats array containing the min, max, mean, median and std\n\t\"\"\"\n\t\n\t\n\tif extend is not False:\n\t\tif array.size == 0:\n\t\t\treturn np.array([np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan])\n\t\telse:\n\t\t\t#return np.array([np.nanmin(array, axis=axis), np.nanmax(array, axis=axis), np.nanmean(array, axis=axis), np.nanmedian(array, axis=axis), np.nanstd(array, axis=axis), np.nansum(array, axis=axis), np.nanmax(array, axis=axis)-np.nanmin(array, axis=axis), np.nanmedian(array, axis=axis)/np.nanstd(array, axis=axis), _iqr(array, 75, axis=axis), _iqr(array, 90, axis=axis)], dtype=float)\n\t\t\t\t\n\t\t\treturn {'min' : np.nanmin(array, axis=axis),\n\t\t\t\t'max' : np.nanmax(array, axis=axis),\n\t\t\t\t'mean' : np.nanmean(array, axis=axis), \n\t\t\t\t'median' : np.nanmedian(array, axis=axis), \n\t\t\t\t'sd' : np.nanstd(array, axis=axis), \n\t\t\t\t'sum' : np.nansum(array, axis=axis), \n\t\t\t\t'range' : np.nanmax(array, axis=axis)-np.nanmin(array, axis=axis), \n\t\t\t\t'noise' : np.nanmedian(array, axis=axis)/np.nanstd(array, axis=axis), \n\t\t\t\t'iqr_75' : _iqr(array, 75, axis=axis), \n\t\t\t\t'iqr_90' : _iqr(array, 90, axis=axis)}\n\telse:\t\t\n\t\tif array.size == 0:\n\t\t\treturn np.array([np.nan, np.nan, np.nan, np.nan, np.nan])\n\t\telse:\n\t\t\treturn np.array([np.nanmin(array, axis=axis), np.nanmax(array, axis=axis), np.nanmean(array, axis=axis), np.nanmedian(array, axis=axis), np.nanstd(array, axis=axis)], dtype=float)\n\ndef _iqr(array, percentile=75, axis=None):\n\t\"\"\"Calculates the interquartile range of an array\"\"\"\n\t\t\t\n\ttry:\n\t\treturn np.subtract(*np.nanpercentile(array, [percentile,100-percentile], axis=axis))\n\texcept TypeError:\n\t\treturn np.nan\t\t\t\n\t\t\t\ndef _Cloud_Identifer(Time, Sg, Sd):\n\t\"\"\"Classifies the type of cloud that is overhead using solar radiation measurements,\n\t\n\tParameters\n\t----------\n\tTime : 1D array of list\n\t\tThe time for each measurement of Sg and Sd. The time needs to be in fractional hour\n\tSg : 1D array or list\n\t\tThe global solar (shortwave) irradiance\n\tSd : 1D array or list\n\t\tThe global diffuse (longwave) irradiance\n\t\t\n\tOutput\n\t------\n\tCloud_Time : array or float\n\t\tA float of the central time for each 15 minute period. If there is more data than\n\t\t15 minutes then the output is a float array.\n\tCloud_Type : array or int\n\t\tAn integer specifying the cloud type per 15 minutes of data. If there is more data\n\t\tthan 15 minutes then the output is an integer array.\n\t\t\n\tReference\n\t---------\n\t\n\t\n\t\"\"\"\n\t\n\t#Ensure inputs are numpy arrays\n\tif isinstance(Time[0], np.datetime64):\n\t\tDate = Time[0]\n\t\tTime = _dt2hours(Time, Date)\n\t\t\n\t\t#print(Date)\n\t\t#print(Time)\n\t\t#print(Time.size)\n\t\t\n\t\tDatetime_Found = 'datetime64'\n\telif isinstance(Time[0], datetime):\n\t\tDate = Time.astype('datetime64[s]').astype('datetime64[D]').astype('datetime64[s]')[-10]\n\t\tTime = np.array([_toHourFraction(x) for x in Time], dtype=float)\n\t\tDatetime_Found = 'datetime'\n\t\n\telse:\n\t\tTime = np.array(Time, dtype=float)\n\t\tDatetime_Found = 'none'\n\tSg = np.array(Sg, dtype=float)\n\tSd = np.array(Sd, dtype=float)\n\t\n\t#Sort data\n\tsort_index = np.argsort(Time, kind='mergesort')\n\tTime = Time[sort_index]\n\tSg = Sg[sort_index]\n\tSd = Sd[sort_index]\n\t\n\t#Determine if Time is shorter than 15 minutes\n\tif Time[-1] - Time[0] < 0.25: \n\t\twarnings.warn('[Cloud_Identifer]: The identification of cloud type requires 15 minutes or more data. Only %.2f minutes was found!' % ((Time[-1] - Time[0])*60), RuntimeWarning, stacklevel=2)\n\t\treturn None, None\n\t\n\t#Determine if Time is exactly 15 minutes\n\telif Time[-1] - Time[0] == 0.25:\n\t\t\n\t\tDiffuse_Fraction = Sd/Sg\n\n\t\tif np.nanmedian(Diffuse_Fraction) >= 0.9: \t#Overcast\n\t\t\tCloud_Type = 2\n\t\telif np.nanmedian(Diffuse_Fraction) <= 0.3: \t#Clear\n\t\t\tCloud_Type = 1\n\t\telif np.nanstd(Diffuse_Fraction) <= 0.05: \t#Cumuliform\n\t\t\tCloud_Type = 4\n\t\telif np.nanstd(Diffuse_Fraction) >= 0.1: \t#Stratiform\n\t\t\tCloud_Type = 3\n\t\telse:\t\t\t\t\t\t\t\t\t#Unclassified\n\t\t\tCloud_Type = 5\n\t\t\n\t\tCloud_Time = np.nanmean(Time)\n\t\n\t#Determine if Time is longer than 15 minutes\n\telif Time[-1] - Time[0] > 0.25:\n\t\t\n\t\t#Calculate Cloud_Time\n\t\tCloud_Num = int((Time[-1] - Time[0])/0.25)+1\n\t\tCloud_Time = np.array([Time[0] + 0.125 + Time_Step for Time_Step in np.linspace(0, Cloud_Num*0.25, Cloud_Num, endpoint=False)], dtype=float)\n\t\t\n\t\t#Calculate Cloud_Type for each 15 minute subset\n\t\tCloud_Type = np.zeros(Cloud_Num)\n\t\tfor i, cloud_time in enumerate(Cloud_Time):\n\t\t\tMask = ((cloud_time-0.125) <= Time) & ((cloud_time+0.125) > Time)\n\t\t\n\t\t\tDiffuse_Fraction = Sd[Mask]/Sg[Mask]\n\t\t\t\n\t\t\tif np.nanmedian(Diffuse_Fraction) >= 0.9: \t#Overcast\n\t\t\t\tCloud_Type[i] = 2\n\t\t\telif np.nanmedian(Diffuse_Fraction) <= 0.3: #Clear\n\t\t\t\tCloud_Type[i] = 1\n\t\t\telif np.nanstd(Diffuse_Fraction) <= 0.05: \t#Cumuliform\n\t\t\t\tCloud_Type[i] = 4\n\t\t\telif np.nanstd(Diffuse_Fraction) >= 0.1: \t#Stratiform\n\t\t\t\tCloud_Type[i] = 3\n\t\t\telse:\t\t\t\t\t\t\t\t\t#Unclassified\n\t\t\t\tCloud_Type[i] = 5\n\n\t\t#If input time was in datetime, we now convert back to datetime\n\t\tif Datetime_Found == 'datetime': Cloud_Time = np.array([roundTime(Date + timedelta(hours=hour_frac), 1) for hour_frac in Cloud_Time], dtype=object)\n\t\tif Datetime_Found == 'datetime64': Cloud_Time = _hours2dt(Cloud_Time, Date)\n\t\t\t\t\n\treturn Cloud_Time, Cloud_Type\n\n\t\t\nif __name__ == \"__main__\":\n\t\n\t#Initial Conditions\n\tt_begin = systime.time()\n\tLocs_Avaliable = ['RUAO', 'Chilbolton', 'All']\n\n\tparser = argparse.ArgumentParser(description='Process some integers.')\n\t\n\t#Command Line Arguments\n\tparser.add_argument('-l', '--location',\n action=\"store\", dest=\"Location\",\n help=\"Specify the location you want to plot from. Current options are: [RUAO, Chilbolton, All]. Default is RUAO. The All argument will plot all available locations onto a single plot with respects to the time range specified.\", \n\t\t\tdefault='RUAO', required=False)\n\t\t\t\n\tparser.add_argument('-s', '--start',\n action=\"store\", dest=\"Date_Start\",\n help=\"The date and time you want to start plotting PG. Format = %%d/%%m/%%Y_%%H:%%M:%%S. Go to http://strftime.org/ for reference on datetime formats\",\n\t\t\trequired=False)\n\n\tparser.add_argument('-e', '--end',\n action=\"store\", dest=\"Date_End\",\n help=\"The date and time you want to end plotting PG. Format = %%d/%%m/%%Y_%%H:%%M:%%S. Go to http://strftime.org/ for reference on datetime formats.\", \n\t\t\trequired=False, default=None)\n\t\n\tparser.add_argument('-k',\n\t\t\taction=\"store\", dest=\"LeadTime\",\n\t\t\thelp=\"Alternative argument to supplying the start and end dates. Use this option to specify the time in minutes you want to plot before the current time. i.e. -k 60 will plot 60 minutes of the latest data\",\n\t\t\trequired=False, type=float, default=None)\n\t\n\tparser.add_argument('-g', '--grade',\n\t\t\taction=\"store\", dest=\"Grade\",\n\t\t\thelp=\"Specify the quality of the PG data. --grade low will use the basic quality controlled PG data but has the advantage of being able to plot as close to live as possible. --grade high is the best quality controlled data available but this data is typically only updated once per day. See --ping for details on when the low grade and high grade data are available. N.B. The speed differences for plotting are minimal although high grade data is faster over longer time ranges.\",\n\t\t\trequired=False, type=str, default='high')\n\t\n\tparser.add_argument('--show',\n action=\"store_true\", dest=\"Show_Plot\",\n help=\"Show the plot to screen. Default is True\", \n\t\t\tdefault='False')\n\t\t\t\n\tparser.add_argument('--save',\n\t\taction=\"store_true\", dest=\"Save_Plot\",\n\t\thelp=\"Save the plot to file. If set to True, the plot will be saved in current directory. Default is False. Format of file name will be 'PG_Quickplot___to_.png'\", \n\t\tdefault='False')\n\t\t\t\n\tparser.add_argument('-f',\n\t\t\taction='store_true', dest=\"fix_plot\",\n help=\"Fixes the axis of RUAO and Chilbolton plot when plotted together.\",\n\t\t\tdefault='True')\n\t\t\t\n\tparser.add_argument('--ping',\n\t\t\taction='store_true',\n help=\"Prints to screen if all the data is available and the latest data that can be plotted.\")\n\t\n\tparser.add_argument('--backend',\n\t\t\ttype=str, default=False, const=True, nargs='?',\n help=\"Without any arguments --backend will check the available Matplotlib back-ends that are available on this system along with their FPS. Specifying the back-end you want to used, e.g. --backend Agg will change the Matplotlib to that backend and not show available backends.\")\n\t\t\t\n\targ = parser.parse_args()\n\t\t\n\tif arg.ping: PG_Ping()\n\tif arg.backend is True: _backend_checker()\n\t\n\t#Conditional Formatting Checks\n\tif not arg.Location in Locs_Avaliable: sys.exit(\"You have not specified a correct location to plot PG data. The available options are %s.\" % Locs_Avaliable)\n\tif arg.LeadTime is None:\n\t\tif arg.Date_Start is None: sys.exit(\"A start date and time must be specified in the format %d/%m/%Y_%H:%M:%S. Go to http://strftime.org/ for reference on datetime formats\")\n\t\tif arg.Date_End is None: sys.exit(\"An end date and time must be specified in the format %d/%m/%Y_%H:%M:%S. Go to http://strftime.org/ for reference on datetime formats\")\n\t\tif not _DatetimeFormat(arg.Date_Start, \"%d/%m/%Y_%H:%M:%S\", check=True): sys.exit(\"Start date and time has not been formatted correctly. The format is %d/%m/%Y_%H:%M:%S. For example 10/11/2017_15:30:00.\")\n\t\tif not _DatetimeFormat(arg.Date_End, \"%d/%m/%Y_%H:%M:%S\", check=True): sys.exit(\"End date and time has not been formatted correctly. The format is %d/%m/%Y_%H:%M:%S. For example 10/11/2017_15:30:00.\")\n\t\tif (_DatetimeFormat(arg.Date_Start, \"%d/%m/%Y_%H:%M:%S\") - _DatetimeFormat(arg.Date_End, \"%d/%m/%Y_%H:%M:%S\")).total_seconds() >= 0: sys.exit(\"Start Date >= End Date. Please change you start and end date specifications\")\n\tif arg.Save_Plot is True and arg.Show_Plot is True: sys.exit(\"--show and --save arguments cannot be given simultaneously!\")\n\tif arg.LeadTime is not None and arg.LeadTime <= 0: sys.exit(\"-k argument requires a positive number in minutes\")\n\tif not np.any(np.in1d(arg.Grade, ['low', 'high'])): sys.exit(\"-g --grade argument can only accept 'low' or 'high' as arguments. See --help for details.\")\n\t\t\n\t#Format start and end dates\n\tif arg.LeadTime is None:\n\t\tDate_Start = _DatetimeFormat(arg.Date_Start, \"%d/%m/%Y_%H:%M:%S\")\n\t\tDate_End = _DatetimeFormat(arg.Date_End, \"%d/%m/%Y_%H:%M:%S\")\n\telse:\n\t\tDate_Start = datetime.utcnow() - timedelta(hours=(arg.LeadTime/60.0+.0/60.0))\n\t\tDate_End = datetime.utcnow()# - timedelta(hours=20.0/60.0)\n\tShoworSave = True if arg.Save_Plot is True else False\n\tHigh_Grade = True if arg.Grade == 'high' else False\n\tHigh_Grade = False if arg.LeadTime is not None else High_Grade\n\t\n\t#Change Matplotlib back-end if requested\n\tif isinstance(arg.backend, str): _backend_changer(arg.backend)\t\t\n\t\n\t#Plot PG Data\n\tPG_Plotter(arg.Location, Date_Start, Date_End, ShoworSave, High_Grade=High_Grade, Fixed_Axis=arg.fix_plot)\n\tPG_Report(arg.Location, Date_Start, Date_End, ShoworSave, High_Grade=High_Grade)\n\t\n\tprint(\"PG Quickplotter has completed successfully in %0.2fs\" % (systime.time()-t_begin))","sub_path":"Extras/PG_Quickplotter.py","file_name":"PG_Quickplotter.py","file_ext":"py","file_size_in_byte":80968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"516152229","text":"# code: 22p22c0398-พรระติชัย\n# crated by Phonratichai\n# 19/09/63 14:40\nnameList = ['winn', 'thanarak', 'somchai',\n 'ricky', 'tao', 'wanida', 'peerapol']\n\n\ndef countname_with_alphabet(alphabet):\n count = 0\n for name in nameList:\n if alphabet in name:\n count += 1\n return count\n\n\ndef findname_with_alphabet(alphabet):\n subnameList = []\n for name in nameList:\n if alphabet in name:\n subnameList.append(name)\n return subnameList\n\n\nif __name__ == \"__main__\":\n print(countname_with_alphabet('a'))\n print(countname_with_alphabet('n'))\n print(findname_with_alphabet('a'))\n print(findname_with_alphabet('n'))\n","sub_path":"countname_with_ap.py","file_name":"countname_with_ap.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"163642786","text":"import os.path\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport util\n\ndef learn_distributions(file_lists_by_category):\n \"\"\"\n Estimate the parameters p_d, and q_d from the training set\n \n Input\n -----\n file_lists_by_category: A two-element list. The first element is a list of \n spam files, and the second element is a list of ham files.\n\n Output\n ------\n probabilities_by_category: A two-element tuple. The first element is a dict \n whose keys are words, and whose values are the smoothed estimates of p_d;\n the second element is a dict whose keys are words, and whose values are the \n smoothed estimates of q_d \n \"\"\"\n ### TODO: Write your code here\n\n\n spam_list = file_lists_by_category[0]\n ham_list = file_lists_by_category[1]\n\n spam_counts = util.get_counts(spam_list)\n num_spam_words = len(spam_counts)\n\n ham_counts = util.get_counts(ham_list)\n num_ham_words = len(ham_counts)\n \n D = len(spam_counts.keys() & ham_counts.keys())\n\n p_d = dict()\n q_d = dict()\n\n for word in spam_counts:\n p_d[word] = (spam_counts[word] + 1)/(num_spam_words + D)\n\n p_d[\"default val\"] = 1/(num_spam_words + D)\n \n for word in ham_counts:\n q_d[word] = (ham_counts[word] + 1)/(num_ham_words+ D)\n\n q_d[\"default val\"] = 1/(num_ham_words + D)\n\n probabilities_by_category = (p_d, q_d)\n\n return probabilities_by_category\n\ndef classify_new_email(filename,probabilities_by_category,prior_by_category):\n \"\"\"\n Use Naive Bayes classification to classify the email in the given file.\n\n Inputs\n ------\n filename: name of the file to be classified\n probabilities_by_category: output of function learn_distributions\n prior_by_category: A two-element list as [\\pi, 1-\\pi], where \\pi is the \n parameter in the prior class distribution\n\n Output\n ------\n classify_result: A two-element tuple. The first element is a string whose value\n is either 'spam' or 'ham' depending on the classification result, and the \n second element is a two-element list as [log p(y=1|x), log p(y=0|x)], \n representing the log posterior probabilities\n \"\"\"\n ### TODO: Write your code here\n\n\n file_list = []\n file_list.append(filename)\n\n P_spam = np.math.log(prior_by_category[0])\n P_ham = np.math.log(prior_by_category[1])\n\n p_d = probabilities_by_category[0]\n q_d = probabilities_by_category[1]\n\n X_n = util.get_word_freq(file_list)\n\n spam_predictor = P_spam\n ham_predictor = P_ham\n\n for word in X_n:\n spam_predictor += X_n[word]*np.math.log(p_d.get(word, p_d[\"default val\"]))\n ham_predictor += X_n[word]*np.math.log(q_d.get(word, q_d[\"default val\"]))\n\n if spam_predictor > ham_predictor:\n result = \"spam\"\n else:\n result = \"ham\"\n\n classify_result = (result, [spam_predictor, ham_predictor])\n \n return classify_result\n\ndef classify_new_email_mod(filename, probabilities_by_category, prior_by_category, zeta):\n file_list = []\n file_list.append(filename)\n\n p_d = probabilities_by_category[0]\n q_d = probabilities_by_category[1]\n\n X_n = util.get_word_freq(file_list)\n\n spam_predictor = 0\n ham_predictor = 0\n\n for word in X_n:\n spam_predictor += X_n[word]*np.math.log(p_d.get(word, p_d[\"default val\"]))\n ham_predictor += X_n[word]*np.math.log(q_d.get(word, q_d[\"default val\"]))\n\n if spam_predictor - ham_predictor > np.math.log(zeta):\n result = \"spam\"\n else:\n result = \"ham\"\n\n return result\n\nif __name__ == '__main__':\n \n # folder for training and testing \n spam_folder = \"data/spam\"\n ham_folder = \"data/ham\"\n test_folder = \"data/testing\"\n\n # generate the file lists for training\n file_lists = []\n for folder in (spam_folder, ham_folder):\n file_lists.append(util.get_files_in_folder(folder))\n \n # Learn the distributions \n\n probabilities_by_category = learn_distributions(file_lists)\n \n # prior class distribution\n priors_by_category = [0.5, 0.5]\n \n # Store the classification results\n performance_measures = np.zeros([2,2])\n # explanation of performance_measures:\n # columns and rows are indexed by 0 = 'spam' and 1 = 'ham'\n # rows correspond to true label, columns correspond to guessed label\n # to be more clear, performance_measures = [[p1 p2]\n # [p3 p4]]\n # p1 = Number of emails whose true label is 'spam' and classified as 'spam' \n # p2 = Number of emails whose true label is 'spam' and classified as 'ham' \n # p3 = Number of emails whose true label is 'ham' and classified as 'spam' \n # p4 = Number of emails whose true label is 'ham' and classified as 'ham' \n\n # Classify emails from testing set and measure the performance\n for filename in (util.get_files_in_folder(test_folder)):\n # Classify\n label,log_posterior = classify_new_email(filename,\n probabilities_by_category,\n priors_by_category)\n \n # Measure performance (the filename indicates the true label)\n base = os.path.basename(filename)\n true_index = ('ham' in base) \n guessed_index = (label == 'ham')\n performance_measures[int(true_index), int(guessed_index)] += 1\n\n template=\"You correctly classified %d out of %d spam emails, and %d out of %d ham emails.\"\n # Correct counts are on the diagonal\n correct = np.diag(performance_measures)\n # totals are obtained by summing across guessed labels\n totals = np.sum(performance_measures, 1)\n print(template % (correct[0],totals[0],correct[1],totals[1]))\n \n \n ### TODO: Write your code here to modify the decision rule such that\n ### Type 1 and Type 2 errors can be traded off, plot the trade-off curve\n\n zeta_vals = [10 ** (-150), 10 ** (-10), 10 ** (-9), 10 ** (-8), 10 ** (-7), 10 ** (-6), 10 ** (-5), 10 ** (-4), 10 ** (-3), 10 ** (-2), 10 ** (-1), 10 ** 0, 10 ** 1, 10 ** 3, 10 ** 4, 10 ** 5, 10 ** 6, 10 ** 7, 10 ** 8, 10 ** 9, 10 ** 10, 10 ** 20]\n\n type1 = []\n type2 = []\n\n for zeta in zeta_vals: \n performance_measures = np.zeros([2,2])\n for filename in (util.get_files_in_folder(test_folder)):\n # Classify\n label = classify_new_email_mod(filename,\n probabilities_by_category,\n priors_by_category, zeta)\n\n # Measure performance (the filename indicates the true label)\n base = os.path.basename(filename)\n true_index = ('ham' in base) \n guessed_index = (label == 'ham')\n performance_measures[int(true_index), int(guessed_index)] += 1\n \n # Correct counts are on the diagonal\n totals = np.sum(performance_measures, 1)\n correct = np.diag(performance_measures)\n type1.append(totals[0] - correct[0])\n type2.append(totals[1] - correct[1])\n \n plt.scatter(type1, type2)\n plt.xlabel(\"Number of Type 1 Errors\")\n plt.ylabel(\"Number of Type 2 Errors\")\n plt.title(\"Variation of Type 1 and Type 2 Errors as the Parameter Zeta Changes\")\n plt.show()\n","sub_path":"Lab1/classifier/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":7276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"96210751","text":"from os import path\nfrom wordcloud import WordCloud, STOPWORDS\nimport matplotlib.pyplot as plt\nimport random\nimport os\nfrom PIL import Image\nimport numpy as np\n\n\nfont = 'DroidSansFallbackFull.ttf' #使用的字体\n\ndef gray_color_func(word, font_size, position, orientation, random_state=None, **kwargs):\n return \"hsl(0, 0%%, %d%%)\" % random.randint(60, 100)\n\n\nd = path.dirname(__file__)\n\n\nmask = np.array(Image.open(path.join(d, \"stormtrooper_mask.png\"))) #背景图\n \n\ntext = open(path.join(d, u'santi2.txt')).read() #将文件内容读取\n\ntext = text.replace(\"程心说\", \"程心\") \ntext = text.replace(\"程心和\", \"程心\")\ntext = text.replace(\"程心问\", \"程心\")\n\nstopwords = set(STOPWORDS)\nstopwords.add(\"int\")\nstopwords.add(\"ext\")\n\n# Generate a word cloud image\n\nwc = WordCloud(font_path=font, max_words=2000, mask=mask, stopwords=stopwords, margin=10, random_state=1).generate(text)\n \ndefault_colors = wc.to_array()\nplt.title(\"Custom colors\") #图片标题\nplt.imshow(wc.recolor(color_func=gray_color_func, random_state=3))\nwc.to_file(\"a_new_hope.png\") #保存文件\nplt.axis(\"off\")\nplt.figure()\n\nplt.title(\"三体-词频统计\")\nplt.imshow(default_colors)\nplt.axis(\"off\")\nplt.show()\n\n\n","sub_path":"santi/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"346758102","text":"def crc(event, context):\r\n \"\"\" Returns a CRC (Challenge Response Check) to keep this webhook\r\n secure. https://goo.gl/kFdJgV for more details.\r\n Also takes account activity events from twitter and sends an sms notification if SEND_NOTIFICATIONS is True\"\"\"\r\n # Short circuit ping from CloudWatch Events\r\n if event.get('source', None) == 'aws.events':\r\n print('ping')\r\n return\r\n\r\n # If request is a GET method, handle CRC request as documented by Twitter\r\n if event['httpMethod'] == 'GET':\r\n import base64\r\n import hmac\r\n import hashlib\r\n import os\r\n import json\r\n\r\n print(str(event))\r\n print('Calculating CRC')\r\n crc = event['queryStringParameters']['crc_token']\r\n sha256_hash_digest = hmac.new(\r\n os.environ['API_SECRET'].encode('utf-8'), msg=crc.encode('utf-8'),\r\n digestmod=hashlib.sha256).digest()\r\n\r\n body = json.dumps({'response_token': 'sha256=' +\r\n base64.b64encode(sha256_hash_digest).decode('utf-8')})\r\n print('Body response: {}'.format(body))\r\n response = {\r\n 'statusCode': 200,\r\n 'body': body\r\n }\r\n return response\r\n\r\n # If request is POST, handle account activity event from twitter\r\n if event['httpMethod'] == 'POST':\r\n import os\r\n import json\r\n\r\n print(\"EVENT!!!!\" + str(event))\r\n body = json.loads(event['body'])\r\n\r\n # If we want to forward notifications and this is a direct message event\r\n if os.environ['SEND_NOTIFICATIONS'] == 'True' and 'direct_message_events' in body:\r\n\r\n # If the sender of the direct message is not me, forward notification\r\n sender_id = body['direct_message_events'][0]['message_create']['sender_id']\r\n if sender_id != os.environ['MY_TWITTER_ID']:\r\n\r\n # importing requests takes a while (>3 seconds may cause timeout fail or increased AWS costs)\r\n # requests a better alternative to twilio.rest.Client. smaller package size for Lambda\r\n import requests\r\n dm = \"@\" + body['users'][sender_id]['screen_name'] + \": \"\r\n dm += body['direct_message_events'][0]['message_create']['message_data']['text']\r\n # Use twilio to send an sms notification\r\n print (\"HANDLING FOR TWILIO: \" + dm)\r\n response = requests.post(\r\n \"https://api.twilio.com/2010-04-01/Accounts/\"+os.environ['TWILIO_ACCOUNT_SID']+\"/Messages.json\",\r\n headers={\"Content-Type\": \"application/x-www-form-urlencoded\"\r\n },\r\n data={\r\n \"From\": os.environ['TWILIO_NUMBER'],\r\n \"To\": os.environ['MY_NUMBER'],\r\n \"Body\": dm\r\n },\r\n auth=(os.environ['TWILIO_ACCOUNT_SID'], os.environ['TWILIO_AUTH_TOKEN'])\r\n )\r\n\r\n print(\"SEND UPDATE STATUS: \" + response.status)\r\n\r\n content = \"direct message event received\"\r\n\r\n else:\r\n print(\"EVENT RECEIVED AND IGNORED\")\r\n content = \"no\"\r\n\r\n return {\r\n 'statusCode': 200,\r\n 'body': content\r\n }\r\n","sub_path":"twitter-events/twitter_events.py","file_name":"twitter_events.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"559192252","text":"import os\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\nfrom sklearn import metrics\nfrom tensorboardX import SummaryWriter\n\nimport bertTransformer.distributed as distributed\nfrom bertTransformer.models.data_loader import get_minibatches\n# import onmt\nfrom bertTransformer.models.reporter import ReportMgr\nfrom bertTransformer.models.stats import Statistics\nfrom bertTransformer.others.logging import logger\nfrom bertTransformer.others.utils import test_rouge, rouge_results_to_str\nfrom bertTransformer.evaluate import predict_vote\n\n\ndef _tally_parameters(model):\n\tn_params = sum([p.nelement() for p in model.parameters()])\n\treturn n_params\n\n\ndef build_trainer(args, device_id, model,\n\t\t\t\t optim):\n\t\"\"\"\n Simplify `Trainer` creation based on user `opt`s*\n Args:\n opt (:obj:`Namespace`): user options (usually from argument parsing)\n model (:obj:`onmt.models.NMTModel`): the model to train\n fields (dict): dict of fields\n optim (:obj:`onmt.utils.Optimizer`): optimizer used during training\n data_type (str): string describing the type of data\n e.g. \"text\", \"img\", \"audio\"\n model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object\n used to save the model\n \"\"\"\n\t# device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n\n\tgrad_accum_count = args.accum_count\n\tn_gpu = args.world_size\n\n\t# if device_id >= 0: # != 'cpu': # >= 0:\n\t# \tgpu_rank = int(args.gpu_ranks)\n\t# else:\n\tgpu_rank = 0\n\tn_gpu = 0\n\n\tprint('gpu_rank %d' % gpu_rank)\n\n\ttensorboard_log_dir = args.model_path\n\n\twriter = SummaryWriter(tensorboard_log_dir, comment=\"Unmt\")\n\n\treport_manager = ReportMgr(args.report_every, start_time=-1, tensorboard_writer=writer)\n\n\ttrainer = Trainer(args, model, optim, grad_accum_count, n_gpu, gpu_rank, report_manager)\n\n\t# print(tr)\n\tif (model):\n\t\tn_params = _tally_parameters(model)\n\t\tlogger.info('* number of parameters: %d' % n_params)\n\n\treturn trainer\n\n\nclass Trainer(object):\n\t\"\"\"\n Class that controls the training process.\n Args:\n model(:py:class:`onmt.models.model.NMTModel`): translation model\n to train\n train_loss(:obj:`onmt.utils.loss.LossComputeBase`):\n training loss computation\n valid_loss(:obj:`onmt.utils.loss.LossComputeBase`):\n training loss computation\n optim(:obj:`onmt.utils.optimizers.Optimizer`):\n the optimizer responsible for update\n trunc_size(int): length of truncated back propagation through time\n shard_size(int): compute loss in shards of this size for efficiency\n data_type(string): type of the source input: [text|img|audio]\n norm_method(string): normalization methods: [sents|tokens]\n grad_accum_count(int): accumulate gradients this many times.\n report_manager(:obj:`onmt.utils.ReportMgrBase`):\n the object that creates reports, or None\n model_saver(:obj:`onmt.models.ModelSaverBase`): the saver is\n used to save a checkpoint.\n Thus nothing will be saved if this parameter is None\n \"\"\"\n\n\tdef __init__(self, args, model, optim,\n\t\t\t\t grad_accum_count=1, n_gpu=1, gpu_rank=1,\n\t\t\t\t report_manager=None):\n\t\t# Basic attributes.\n\t\tself.args = args\n\t\tself.check_steps = args.check_steps\n\t\tself.model = model\n\t\tself.optim = optim\n\t\tself.grad_accum_count = grad_accum_count\n\t\tself.n_gpu = n_gpu\n\t\tself.gpu_rank = gpu_rank\n\t\tself.report_manager = report_manager\n\t\tself.best_acc = 0.\n\t\tself.loss = torch.nn.CrossEntropyLoss() # torch.nn.BCELoss(reduction='none')\n\t\tassert grad_accum_count > 0\n\t\t# Set model in training mode.\n\t\tif (model):\n\t\t\tself.model.train()\n\n\tdef train(self, train_dataset, device): # , valid_iter_fct=None, valid_steps=-1)\n\t\t\"\"\"\n The main training loops.\n by iterating over training data (i.e. `train_iter_fct`)\n and running validation (i.e. iterating over `valid_iter_fct`\n Args:\n train_iter_fct(function): a function that returns the train\n iterator. e.g. something like\n train_iter_fct = lambda: generator(*args, **kwargs)\n valid_iter_fct(function): same as train_iter_fct, for valid data\n train_steps(int):\n valid_steps(int):\n save_checkpoint_steps(int):\n Return:\n None\n \"\"\"\n\t\t# step = self.optim._step + 1\n\t\t# step = self.optim._step + 1\n\t\t# epoch = 0\n\t\ttrue_batchs = []\n\t\taccum = 0\n\t\tnormalization = 0\n\t\t# train_iter = train_iter_fct()\n\n\t\ttotal_stats = Statistics()\n\t\treport_stats = Statistics()\n\t\tself._start_report_manager(start_time=total_stats.start_time)\n\t\tif self.args.do_eval:\n\t\t\ttest_dataset = torch.load(self.args.bert_data_path + 'test.data')\n\t\t\tlogger.info('Loading test dataset from %s, number of examples: %d' %\n\t\t\t (self.args.bert_data_path, len(test_dataset)))\n\t\t\ttest_dataloader = DataLoader(dataset=test_dataset, batch_size=self.args.batch_size, shuffle=False)\n\t\t\tif self.args.do_use_second_dataset:\n\t\t\t\ttest_dataset2 = torch.load(self.args.second_dataset_path + 'test.data')\n\t\t\t\ttest_dataloader2 = DataLoader(dataset=test_dataset2, batch_size=self.args.batch_size, shuffle=False)\n\t\tfor epoch in range(self.args.train_epochs):\n\t\t\tn_correct, n_total = 0., 0.\n\t\t\treduce_counter = 0\n\t\t\tloss_total = 0\n\n\t\t\tlogger.info('Getting minibatches')\n\t\t\tmini_batches = get_minibatches(train_dataset, self.args.batch_size, self.args.max_seq_length)\n\t\t\tlogger.info('Number of minibatches: %s' % (len(train_dataset) // self.args.batch_size))\n\t\t\tlogger.info('Start training...')\n\t\t\tfor step, batch in enumerate(mini_batches):\n\t\t\t\t# if self.n_gpu == 0 or (step % self.n_gpu == self.gpu_rank):\n\t\t\t\tself.optim.zero_grad()\n\t\t\t\t\t# true_batchs.append(batch)\n\t\t\t\t\t# normalization += batch.batch_size\n\t\t\t\t\t# accum += 1\n\t\t\t\t\t# if accum == self.grad_accum_count:\n\t\t\t\t\t# \treduce_counter += 1\n\t\t\t\t\t# \tif self.n_gpu > 1:\n\t\t\t\t\t# \t\tnormalization = sum(distributed.all_gather_list(normalization))\n\t\t\t\tsrc, labels, segs, clss = batch[0], batch[1], batch[2], batch[3]\n\t\t\t\tif torch.cuda.is_available():\n\t\t\t\t\tsrc = torch.cuda.LongTensor(src).to(device) # .reshape(-1, self.args.max_seq_length)\n\t\t\t\t\tlabels = torch.cuda.LongTensor(labels).to(device) # .reshape(1, -1)\n\t\t\t\t\tsegs = torch.cuda.LongTensor(segs).to(device) # .reshape(1, -1)\n\n\t\t\t\t\tclss = [(cls + [-1] * (max([len(i) for i in clss]) - len(cls))) for cls in clss]\n\t\t\t\t\tclss = torch.cuda.LongTensor(clss).to(device)\n\t\t\t\t\tmask = torch.cuda.ByteTensor((1 - (src == 0))).to(device)\n\t\t\t\t\tmask_cls = torch.cuda.ByteTensor((1 - (clss == -1)))\n\t\t\t\telse:\n\t\t\t\t\tsrc = torch.LongTensor(src).to(device) # .reshape(-1, self.args.max_seq_length)\n\t\t\t\t\tlabels = torch.LongTensor(labels).to(device) # .reshape(1, -1)\n\t\t\t\t\tsegs = torch.LongTensor(segs).to(device) # .reshape(1, -1)\n\n\t\t\t\t\tclss = [(cls + [-1] * (max([len(i) for i in clss]) - len(cls))) for cls in clss]\n\t\t\t\t\tclss = torch.LongTensor(clss).to(device)\n\t\t\t\t\tmask = torch.ByteTensor((1 - (src == 0))).to(device)\n\t\t\t\t\tmask_cls = torch.ByteTensor((1 - (clss == -1))) # torch.ByteTensor(mask_cls).to(device)\n\n\t\t\t\t'''src, labels, segs, clss = batch['src'], batch['labels'], batch['segs'], batch['clss']\n\t\t\t\tif torch.cuda.is_available():\n\t\t\t\t\tsrc = torch.cuda.LongTensor([t for t in src]).to(device) # .reshape(-1, self.args.max_seq_length)\n\t\t\t\t\tlabels = torch.cuda.LongTensor(labels).to(device) # .reshape(1, -1)\n\t\t\t\t\tsegs = torch.cuda.LongTensor(segs).to(device) # .reshape(1, -1)\n\n\t\t\t\t\tclss = [(cls + [-1] * (max([len(i) for i in clss]) - len(cls))) for cls in clss]\n\t\t\t\t\tclss = torch.cuda.LongTensor(clss).to(device)\n\t\t\t\t\tmask = torch.cuda.ByteTensor((1 - (src == 0))).to(device)\n\t\t\t\t\tmask_cls = torch.cuda.ByteTensor((1 - (clss == -1)))\n\t\t\t\telse:\n\t\t\t\t\tsrc = torch.LongTensor(src).to(device) \t\t# .reshape(-1, self.args.max_seq_length)\n\t\t\t\t\tlabels = torch.LongTensor(labels).to(device) \t# .reshape(1, -1)\n\t\t\t\t\tsegs = torch.LongTensor(segs).to(device)\t\t# .reshape(1, -1)\n\n\t\t\t\t\tclss = [(cls + [-1] * (max([len(i) for i in clss]) - len(cls))) for cls in clss]\n\t\t\t\t\tclss = torch.LongTensor(clss).to(device)\n\t\t\t\t\tmask = torch.ByteTensor((1 - (src == 0))).to(device)\n\t\t\t\t\tmask_cls = torch.ByteTensor((1 - (clss == -1))) # torch.ByteTensor(mask_cls).to(device)'''\n\n\t\t\t\t# src = batch.src\n\t\t\t\t# labels = batch.labels\n\t\t\t\t# segs = batch.segs\n\t\t\t\t# clss = batch.clss\n\t\t\t\t# mask = batch.mask\n\t\t\t\t# mask_cls = batch.mask_cls\n\n\t\t\t\tlogits = self.model(src, segs, clss, mask, mask_cls) # , mask\n\n\t\t\t\tloss = self.loss(logits, labels)\n\t\t\t\tn_correct += (torch.argmax(logits, -1) == labels).sum().item()\n\t\t\t\tn_total += len(logits)\n\t\t\t\tloss_total += loss.item() * len(logits)\n\t\t\t\t# loss = (loss * mask.float()).sum()\n\t\t\t\t# (loss / loss.numel()).backward()\n\t\t\t\tloss.backward()\n\t\t\t\t# loss.div(float(normalization)).backward()\n\t\t\t\t# 4. Update the parameters and statistics.\n\t\t\t\t# if self.grad_accum_count == 1:\n\t\t\t\t# Multi GPU gradient gather\n\t\t\t\tif self.n_gpu > 1:\n\t\t\t\t\tgrads = [p.grad.data for p in self.model.parameters()\n\t\t\t\t\t\t\t if p.requires_grad\n\t\t\t\t\t\t\t and p.grad is not None]\n\t\t\t\t\tdistributed.all_reduce_and_rescale_tensors(\n\t\t\t\t\t\tgrads, float(1))\n\t\t\t\tself.optim.step()\n\n\t\t\t\tbatch_stats = Statistics(float(loss.cpu().item()), normalization)\n\t\t\t\ttotal_stats.update(batch_stats)\n\t\t\t\treport_stats.update(batch_stats)\n\n\t\t\t\tlogger.info('step-{}, loss:{:.4f}, acc:{:.4f}'.format(step, loss_total / n_total, n_correct / n_total))\n\t\t\t\tif step % self.check_steps == 0 or step == batch_num:\n\t\t\t\t\tvalid_acc_2 = 0\n\t\t\t\t\tvalid_acc = self.test(self.model, test_dataloader, device)\n\t\t\t\t\tif self.args.do_use_second_dataset:\n\t\t\t\t\t\tvalid_acc_2 = self.test(self.model, test_dataloader2, device)\n\t\t\t\t\tif valid_acc > self.best_acc or valid_acc_2 > self.best_acc:\n\t\t\t\t\t\tself.best_acc = valid_acc\n\t\t\t\t\t\tself._save(str(self.args.model_name)+str(self.args.lr)+'valid', epoch, self.best_acc)\n\t\t\t\t# \tself._save(epoch, step)\n\t\t\t\t# report_stats = self._maybe_report_training(step, epoch, self.optim.learning_rate, report_stats)\n\n\t\t\t\t# in case of multi step gradient accumulation,\n\t\t\t\t# update only after accum batches\n\t\t\t# valid_acc = self.test(self.model, test_dataset, device)\n\t\t\t# if valid_acc > self.best_acc:\n\t\t\t# \tself.best_acc = valid_acc\n\t\t\t# self._save(str(self.args.model_name)+str(self.args.lr), epoch, valid_acc)\n\t\t\tif self.grad_accum_count > 1:\n\t\t\t\tif self.n_gpu > 1:\n\t\t\t\t\tgrads = [p.grad.data for p in self.model.parameters()\n\t\t\t\t\t\t\t if p.requires_grad\n\t\t\t\t\t\t\t and p.grad is not None]\n\t\t\t\t\tdistributed.all_reduce_and_rescale_tensors(\n\t\t\t\t\t\tgrads, float(1))\n\t\t\t\tself.optim.step()\n\n\t\t\t# return n_correct, n_total, loss_total\n\n\t\t\tif self.args.do_eval:\n\t\t\t\t# model = trainer.model\n\t\t\t\t# self.model.eval()\n\t\t\t\t# trainer = build_trainer(args, device_id, model, None)\n\t\t\t\ttry:\n\t\t\t\t\tself.test(self.model, test_dataset, device)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tlogger.error(e)\n\t\t\t\t# true_batchs = []\n\t\t\t\t# accum = 0\n\t\t\t\t# normalization = 0\n\t\t\t\t# step += 1\n\t\t\t\t# if step > train_steps:\n\t\t\t\t# \tbreak\n\n\t\t# return total_stats\n\n\tdef validate(self, valid_dataset, device, epoch=0):\n\t\t\"\"\" Validate model.\n valid_iter: validate data iterator\n Returns:\n :obj:`nmt.Statistics`: validation loss statistics\n \"\"\"\n\t\t# Set model in validating mode.\n\t\tself.model.eval()\n\t\tstats = Statistics()\n\n\t\twith torch.no_grad():\n\t\t\tmini_batches = get_minibatches(valid_dataset, self.args.batch_size, self.args.max_seq_length)\n\t\t\tlogger.info('Number of minibatches: %s' % (len(valid_dataset) // self.args.batch_size))\n\t\t\tfor step, batch in enumerate(mini_batches):\n\t\t\t\tsrc, labels, segs, clss = batch[0], batch[1], batch[2], batch[3]\n\t\t\t\tif torch.cuda.is_available():\n\t\t\t\t\tsrc = torch.cuda.LongTensor(src).to(device) # .reshape(-1, self.args.max_seq_length)\n\t\t\t\t\tlabels = torch.cuda.LongTensor(labels).to(device) # .reshape(1, -1)\n\t\t\t\t\tsegs = torch.cuda.LongTensor(segs).to(device) # .reshape(1, -1)\n\n\t\t\t\t\tclss = [(cls + [-1] * (max([len(i) for i in clss]) - len(cls))) for cls in clss]\n\t\t\t\t\tclss = torch.cuda.LongTensor(clss).to(device)\n\t\t\t\t\tmask = torch.cuda.ByteTensor((1 - (src == 0))).to(device)\n\t\t\t\t\tmask_cls = torch.cuda.ByteTensor((1 - (clss == -1)))\n\t\t\t\telse:\n\t\t\t\t\tsrc = torch.LongTensor(src).to(device) # .reshape(-1, self.args.max_seq_length)\n\t\t\t\t\tlabels = torch.LongTensor(labels).to(device) # .reshape(1, -1)\n\t\t\t\t\tsegs = torch.LongTensor(segs).to(device) # .reshape(1, -1)\n\n\t\t\t\t\tclss = [(cls + [-1] * (max([len(i) for i in clss]) - len(cls))) for cls in clss]\n\t\t\t\t\tclss = torch.LongTensor(clss).to(device)\n\t\t\t\t\tmask = torch.ByteTensor((1 - (src == 0))).to(device)\n\t\t\t\t\tmask_cls = torch.ByteTensor((1 - (clss == -1))) # torch.ByteTensor(mask_cls).to(device)\n\n\t\t\t\tlogits = self.model(src, segs, clss, mask, mask_cls) # , mask\n\n\t\t\t\tloss = self.loss(logits, labels)\n\t\t\t\t# loss = (loss * mask.float()).sum()\n\t\t\t\tbatch_stats = Statistics(float(loss.cpu().item()), len(labels))\n\t\t\t\tstats.update(batch_stats)\n\t\t\tself._report_step(0, epoch, valid_stats=stats)\n\t\t\treturn stats\n\n\tdef test(self, model, test_dataloader, device, cal_lead=False, cal_oracle=False):\n\t\t\"\"\" Validate model.\n valid_iter: validate data iterator\n Returns:\n :obj:`nmt.Statistics`: validation loss statistics\n \"\"\"\n\t\tmodel.eval()\n\t\tstats = Statistics()\n\t\tbatch_num = len(test_dataloader)\n\t\t# logger.info('Number of minibatches: %s' % batch_num)\n\t\tmini_batches = get_minibatches(test_dataset, self.args.batch_size, self.args.max_seq_length, shuffle=False)\n\t\tlogger.info('Number of minibatches: %s' % len(test_dataloader))\n\t\twith torch.no_grad():\n\t\t\tn_correct = 0.\n\t\t\tn_total = 0.\n\t\t\ttarget_all = None\n\t\t\toutput_all = None\n\t\t\tfull_pred = []\n\t\t\tfull_label_ids = []\n\t\t\tfor step, batch in enumerate(mini_batches):\n\t\t\t\tsrc, labels, segs, clss = batch[0], batch[1], batch[2], batch[3]\n\t\t\t\tif torch.cuda.is_available():\n\t\t\t\t\tsrc = torch.cuda.LongTensor(src).to(device) # .reshape(-1, self.args.max_seq_length)\n\t\t\t\t\tlabels = torch.cuda.LongTensor(labels).to(device) # .reshape(1, -1)\n\t\t\t\t\tsegs = torch.cuda.LongTensor(segs).to(device) # .reshape(1, -1)\n\n\t\t\t\t\tclss = [(cls + [-1] * (max([len(i) for i in clss]) - len(cls))) for cls in clss]\n\t\t\t\t\tclss = torch.cuda.LongTensor(clss).to(device)\n\t\t\t\t\tmask = torch.cuda.ByteTensor((1 - (src == 0))).to(device)\n\t\t\t\t\tmask_cls = torch.cuda.ByteTensor((1 - (clss == -1)))\n\t\t\t\telse:\n\t\t\t\t\tsrc = torch.LongTensor(src).to(device) # .reshape(-1, self.args.max_seq_length)\n\t\t\t\t\tlabels = torch.LongTensor(labels).to(device) # .reshape(1, -1)\n\t\t\t\t\tsegs = torch.LongTensor(segs).to(device) # .reshape(1, -1)\n\n\t\t\t\t\tclss = [(cls + [-1] * (max([len(i) for i in clss]) - len(cls))) for cls in clss]\n\t\t\t\t\tclss = torch.LongTensor(clss).to(device)\n\t\t\t\t\tmask = torch.ByteTensor((1 - (src == 0))).to(device)\n\t\t\t\t\tmask_cls = torch.ByteTensor((1 - (clss == -1))) # torch.ByteTensor(mask_cls).to(device)\n\n\t\t\t\tlogits = self.model(src, segs, clss, mask, mask_cls) # , mask\n\t\t\t\t# loss = self.loss(logits, labels)\n\t\t\t\tn_correct += (torch.argmax(logits, -1) == labels).sum().item()\n\t\t\t\tn_total += len(logits)\n\t\t\t\tfull_pred.extend(torch.argmax(logits, -1).tolist())\n\t\t\t\tfull_label_ids.extend(labels.tolist())\n\n\t\t\t\tif target_all is None:\n\t\t\t\t\ttarget_all = labels\n\t\t\t\t\toutput_all = logits\n\t\t\t\telse:\n\t\t\t\t\ttarget_all = torch.cat((target_all, labels), dim=0)\n\t\t\t\t\toutput_all = torch.cat((output_all, logits), dim=0)\n\n\t\t\t\t# batch_stats = Statistics(float(loss.cpu().item()), len(labels))\n\t\t\t\t# stats.update(batch_stats)\n\n\t\t\t\t# sent_scores = sent_scores + mask.float()\n\t\t\t\t# sent_scores = sent_scores.cpu().data.numpy()\n\t\t\t\t# selected_ids = np.argsort(-sent_scores, 1)\n\t\t\tacc = n_correct / n_total\n\t\t\tpred_res = metrics.classification_report(target_all.cpu(), torch.argmax(output_all, -1).cpu(),\n\t\t\t\t\t\t\t\t\t\t\t\t target_names=['NEG', 'NEU', 'POS'])\n\t\t\tlogger.info('Prediction results: \\n{}'.format(pred_res))\n\n\t\t\tpredict_vote(full_pred, full_label_ids, test_dataloader)\n\t\t\t# self._report_step(0, step, valid_stats=stats)\n\t\treturn acc\n\n\t\tdef orig():\n\t\t\t# Set model in validating mode.\n\t\t\tdef _get_ngrams(n, text):\n\t\t\t\tngram_set = set()\n\t\t\t\ttext_length = len(text)\n\t\t\t\tmax_index_ngram_start = text_length - n\n\t\t\t\tfor i in range(max_index_ngram_start + 1):\n\t\t\t\t\tngram_set.add(tuple(text[i:i + n]))\n\t\t\t\treturn ngram_set\n\n\t\t\tdef _block_tri(c, p):\n\t\t\t\ttri_c = _get_ngrams(3, c.split())\n\t\t\t\tfor s in p:\n\t\t\t\t\ttri_s = _get_ngrams(3, s.split())\n\t\t\t\t\tif len(tri_c.intersection(tri_s)) > 0:\n\t\t\t\t\t\treturn True\n\t\t\t\treturn False\n\n\t\t\tif not cal_lead and not cal_oracle:\n\t\t\t\tmodel.eval()\n\t\t\tstats = Statistics()\n\n\t\t\tcan_path = '%s_step%d.candidate' % (self.args.result_path, step)\n\t\t\tgold_path = '%s_step%d.gold' % (self.args.result_path, step)\n\t\t\twith open(can_path, 'w') as save_pred:\n\t\t\t\twith open(gold_path, 'w') as save_gold:\n\t\t\t\t\twith torch.no_grad():\n\t\t\t\t\t\ttarget_all = []\n\t\t\t\t\t\toutput_all = []\n\t\t\t\t\t\t# n_correct, n_total = 0., 0.\n\t\t\t\t\t\tmini_batches = get_minibatches(test_dataset, self.args.batch_size, self.args.max_seq_length)\n\t\t\t\t\t\tfor i, batch in enumerate(mini_batches):\n\t\t\t\t\t\t\tsrc = batch.src\n\t\t\t\t\t\t\tlabels = batch.labels\n\t\t\t\t\t\t\tsegs = batch.segs\n\t\t\t\t\t\t\tclss = batch.clss\n\t\t\t\t\t\t\tmask = batch.mask\n\t\t\t\t\t\t\tmask_cls = batch.mask_cls\n\n\t\t\t\t\t\t\tgold = []\n\t\t\t\t\t\t\tpred = []\n\n\t\t\t\t\t\t\tif (cal_lead):\n\t\t\t\t\t\t\t\tselected_ids = [list(range(batch.clss.size(1)))] * batch.batch_size\n\t\t\t\t\t\t\telif (cal_oracle):\n\t\t\t\t\t\t\t\tselected_ids = [[j for j in range(batch.clss.size(1)) if labels[i][j] == 1] for i in\n\t\t\t\t\t\t\t\t\t\t\t\trange(batch.batch_size)]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tlogits = model(src, segs, clss, mask, mask_cls)\n\n\t\t\t\t\t\t\t\tloss = self.loss(logits, labels) # loss = self.loss(sent_scores, labels.float())\n\t\t\t\t\t\t\t\t# loss = (loss * mask.float()).sum()\n\t\t\t\t\t\t\t\t# n_correct += (torch.argmax(logits, -1) == labels).sum().item()\n\t\t\t\t\t\t\t\t# n_total += len(logits)\n\t\t\t\t\t\t\t\tif target_all is None:\n\t\t\t\t\t\t\t\t\ttarget_all = labels\n\t\t\t\t\t\t\t\t\toutput_all = logits\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\ttarget_all = torch.cat((target_all, labels), dim=0)\n\t\t\t\t\t\t\t\t\toutput_all = torch.cat((output_all, logits), dim=0)\n\n\t\t\t\t\t\t\t\tbatch_stats = Statistics(float(loss.cpu().item()), len(labels))\n\t\t\t\t\t\t\t\tstats.update(batch_stats)\n\n\t\t\t\t\t\t\t\tsent_scores = sent_scores + mask.float()\n\t\t\t\t\t\t\t\tsent_scores = sent_scores.cpu().data.numpy()\n\t\t\t\t\t\t\t\tselected_ids = np.argsort(-sent_scores, 1)\n\t\t\t\t\t\t\t# selected_ids = np.sort(selected_ids,1)\n\t\t\t\t\t\t\tfor i, idx in enumerate(selected_ids):\n\t\t\t\t\t\t\t\t_pred = []\n\t\t\t\t\t\t\t\tif len(batch.src_str[i]) == 0:\n\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\tfor j in selected_ids[i][:len(batch.src_str[i])]:\n\t\t\t\t\t\t\t\t\tif j >= len(batch.src_str[i]):\n\t\t\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t\t\tcandidate = batch.src_str[i][j].strip()\n\t\t\t\t\t\t\t\t\tif self.args.block_trigram:\n\t\t\t\t\t\t\t\t\t\tif not _block_tri(candidate, _pred):\n\t\t\t\t\t\t\t\t\t\t\t_pred.append(candidate)\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\t_pred.append(candidate)\n\n\t\t\t\t\t\t\t\t\tif (not cal_oracle) and (not self.args.recall_eval) and len(_pred) == 3:\n\t\t\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\t\t_pred = ''.join(_pred)\n\t\t\t\t\t\t\t\tif self.args.recall_eval:\n\t\t\t\t\t\t\t\t\t_pred = ' '.join(_pred.split()[:len(batch.tgt_str[i].split())])\n\n\t\t\t\t\t\t\t\tpred.append(_pred)\n\t\t\t\t\t\t\t\tgold.append(batch.tgt_str[i])\n\n\t\t\t\t\t\t\tfor i in range(len(gold)):\n\t\t\t\t\t\t\t\tsave_gold.write(gold[i].strip() + '\\n')\n\t\t\t\t\t\t\tfor i in range(len(pred)):\n\t\t\t\t\t\t\t\tsave_pred.write(pred[i].strip() + '\\n')\n\t\t\t\t\tpred_res = metrics.classification_report(target_all.cpu(), torch.argmax(output_all, -1).cpu(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t target_names=['NEG', 'NEU', 'POS'])\n\t\t\t\t\tlogger.info('Prediction results for test dataset: \\n{}'.format(pred_res))\n\t\t\tif step != -1 and self.args.report_rouge:\n\t\t\t\trouges = test_rouge(self.args.temp_dir, can_path, gold_path)\n\t\t\t\tlogger.info('Rouges at step %d \\n%s' % (step, rouge_results_to_str(rouges)))\n\t\t\tself._report_step(0, step, valid_stats=stats)\n\n\t\t\treturn stats\n\n\tdef _gradient_accumulation(self, true_batchs, normalization, total_stats,\n\t\t\t\t\t\t\t report_stats, n_correct, n_total):\n\t\tif self.grad_accum_count > 1:\n\t\t\tself.model.zero_grad()\n\t\tloss_total = 0.\n\t\tfor batch in true_batchs:\n\t\t\tif self.grad_accum_count == 1:\n\t\t\t\tself.model.zero_grad()\n\n\t\t\tsrc = batch.src\n\t\t\tlabels = batch.labels\n\t\t\tsegs = batch.segs\n\t\t\tclss = batch.clss\n\t\t\tmask = batch.mask\n\t\t\tmask_cls = batch.mask_cls\n\n\t\t\tlogits = self.model(src, segs, clss, mask, mask_cls) # , mask\n\n\t\t\tloss = self.loss(logits, labels)\n\t\t\tn_correct += (torch.argmax(logits, -1) == labels).sum().item()\n\t\t\tn_total += len(logits)\n\t\t\tloss_total += loss.item() * len(logits)\n\t\t\t# loss = (loss * mask.float()).sum()\n\t\t\t# (loss / loss.numel()).backward()\n\t\t\tloss.backward()\n\t\t\t# loss.div(float(normalization)).backward()\n\n\t\t\tbatch_stats = Statistics(float(loss.cpu().item()), normalization)\n\n\t\t\ttotal_stats.update(batch_stats)\n\t\t\treport_stats.update(batch_stats)\n\n\t\t\t# 4. Update the parameters and statistics.\n\t\t\tif self.grad_accum_count == 1:\n\t\t\t\t# Multi GPU gradient gather\n\t\t\t\tif self.n_gpu > 1:\n\t\t\t\t\tgrads = [p.grad.data for p in self.model.parameters()\n\t\t\t\t\t\t\t if p.requires_grad\n\t\t\t\t\t\t\t and p.grad is not None]\n\t\t\t\t\tdistributed.all_reduce_and_rescale_tensors(\n\t\t\t\t\t\tgrads, float(1))\n\t\t\t\tself.optim.step()\n\n\t\t# in case of multi step gradient accumulation,\n\t\t# update only after accum batches\n\t\tif self.grad_accum_count > 1:\n\t\t\tif self.n_gpu > 1:\n\t\t\t\tgrads = [p.grad.data for p in self.model.parameters()\n\t\t\t\t\t\t if p.requires_grad\n\t\t\t\t\t\t and p.grad is not None]\n\t\t\t\tdistributed.all_reduce_and_rescale_tensors(\n\t\t\t\t\tgrads, float(1))\n\t\t\tself.optim.step()\n\n\t\treturn n_correct, n_total, loss_total\n\n\tdef _save(self, model_name, epoch, acc):\n\t\treal_model = self.model\n\t\t# real_generator = (self.generator.module\n\t\t# if isinstance(self.generator, torch.nn.DataParallel)\n\t\t# else self.generator)\n\n\t\tmodel_state_dict = real_model.state_dict()\n\t\t# generator_state_dict = real_generator.state_dict()\n\t\tcheckpoint = {\n\t\t\t'model': model_state_dict,\n\t\t\t# 'generator': generator_state_dict,\n\t\t\t'opt': self.args,\n\t\t\t'optim': self.optim,\n\t\t}\n\t\tcheckpoint_path = os.path.join(self.args.model_path, 'model_{}_epoch_{}_acc_{:.4f}.pt'.format(model_name, epoch, acc))\n\t\tlogger.info(\"Saving checkpoint %s\" % checkpoint_path)\n\t\t# checkpoint_path = '%s_step_%d.pt' % (FLAGS.model_path, step)\n\t\tif not os.path.exists(checkpoint_path):\n\t\t\ttorch.save(checkpoint, checkpoint_path)\n\t\t\treturn checkpoint, checkpoint_path\n\n\tdef _start_report_manager(self, start_time=None):\n\t\t\"\"\"\n Simple function to start report manager (if any)\n \"\"\"\n\t\tif self.report_manager is not None:\n\t\t\tif start_time is None:\n\t\t\t\tself.report_manager.start()\n\t\t\telse:\n\t\t\t\tself.report_manager.start_time = start_time\n\n\tdef _maybe_gather_stats(self, stat):\n\t\t\"\"\"\n Gather statistics in multi-processes cases\n Args:\n stat(:obj:onmt.utils.Statistics): a Statistics object to gather\n or None (it returns None in this case)\n Returns:\n stat: the updated (or unchanged) stat object\n \"\"\"\n\t\tif stat is not None and self.n_gpu > 1:\n\t\t\treturn Statistics.all_gather_stats(stat)\n\t\treturn stat\n\n\tdef _maybe_report_training(self, step, num_steps, learning_rate,\n\t\t\t\t\t\t\t report_stats):\n\t\t\"\"\"\n Simple function to report training stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_training` for doc\n \"\"\"\n\t\tif self.report_manager is not None:\n\t\t\treturn self.report_manager.report_training(\n\t\t\t\tstep, num_steps, learning_rate, report_stats,\n\t\t\t\tmultigpu=self.n_gpu > 1)\n\n\tdef _report_step(self, learning_rate, step, train_stats=None,\n\t\t\t\t\t valid_stats=None):\n\t\t\"\"\"\n Simple function to report stats (if report_manager is set)\n see `onmt.utils.ReportManagerBase.report_step` for doc\n \"\"\"\n\t\tif self.report_manager is not None:\n\t\t\treturn self.report_manager.report_step(\n\t\t\t\tlearning_rate, step, train_stats=train_stats,\n\t\t\t\tvalid_stats=valid_stats)\n\n\tdef _maybe_save(self, step):\n\t\t\"\"\"\n Save the model if a model saver is set\n \"\"\"\n\t\tif self.model_saver is not None:\n\t\t\tself.model_saver.maybe_save(step)\n","sub_path":"BERT/bertTransformer/models/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":23895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"24965779","text":"#!/usr/bin/env python\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport rospy\nfrom scipy.stats import multivariate_normal\nfrom geometry_msgs.msg import Point, PoseStamped\nfrom hanp_msgs.msg import TrackedHumans, TrackedHuman, TrackedSegmentType\nfrom hanp_prediction.msg import PredictedGoal\nfrom std_srvs.srv import SetBool, Trigger, TriggerResponse\nimport tf\nEPS = 1e-12\n\nclass PredictGoal(object):\n def __init__(self, human_num=1):\n self.human_num = human_num\n\n # Map_LAAS\n self.goals_x = [1.5, 7.0, 9.0, 10.5, 1.5, 10.3, 8.5]\n self.goals_y = [2.0, 8.0, 12.5, 15.0, 15.0, 1.5, -4.5]\n self.goal_num = 7\n\n #Map_new\n # self.goals_x = [1.5,1.5,1.5,1.5,1.5,7.5,25,42,42,41.5,42,37,22,15.5,28.5,37,23.5,10.5,15.5,31.5,20,25.5,7]\n # self.goals_y = [45,15,30,60,87,87,81.5,81.5,66,41.5,22,3,3,12.5,12.5,20.5,21.5,28.5,39.5,47,53,59,59]\n\n self.predicted_goal = PoseStamped()\n self.last_idx = 0\n self.changed = False\n self.current_poses = [[] for i in range(self.human_num)]\n self.prev_poses = [[] for i in range(self.human_num)]\n self.mv_nd = multivariate_normal(mean=0,cov=0.1)\n self.theta_phi = [[0]*self.goal_num for i in range(self.human_num)]\n self.window_size = 10\n self.probability_goal = [np.array([1.0/self.goal_num]*self.goal_num) for i in range(self.human_num)]\n self.probability_goal_window = [np.array([[1.0/self.goal_num]*self.goal_num]*self.window_size) for i in range(self.human_num)]\n self.done = False\n self.itr = 0\n\n NODE_NAME = \"human_goal_predict\"\n rospy.init_node(NODE_NAME)\n self.humans_sub_ = rospy.Subscriber(\"/tracked_humans\",TrackedHumans,self.tracked_humansCB)\n self.goal_pub_ = rospy.Publisher(NODE_NAME+\"/predicted_goal\",PredictedGoal, queue_size=2)\n self.goal_srv_ = rospy.Service(\"goal_changed\", Trigger, self.goal_changed)\n rospy.spin()\n\n def tracked_humansCB(self,msg):\n self.prev_poses = self.current_poses\n self.current_poses = [[] for i in range(self.human_num)]\n\n for human in msg.humans:\n for segment in human.segments:\n if segment.type == TrackedSegmentType.TORSO:\n # print((self.human_num))\n self.current_poses[human.track_id-1].append(segment.pose.pose)\n if not self.done:\n self.prev_poses = self.current_poses\n\n for i in range(0,len(self.current_poses[0])):\n # print(self.current_poses[0][i])\n diff = np.linalg.norm([self.current_poses[0][i].position.x - self.prev_poses[0][i].position.x, self.current_poses[0][i].position.y - self.prev_poses[0][i].position.y])\n\n if diff > EPS or not self.done:\n dist = []\n for j in range(0,len(self.goals_x)):\n # print(self.current_poses[i])\n vec1 = np.array([self.goals_x[j],self.goals_y[j],0.0]) - np.array([self.current_poses[0][i].position.x,self.current_poses[0][i].position.y,0.0]) #Vector from current position to a goal\n # print(self.current_poses[i][0].orientation)\n rotation = (self.current_poses[0][i].orientation.x,self.current_poses[0][i].orientation.y,self.current_poses[0][i].orientation.z,self.current_poses[0][i].orientation.w)\n roll,pitch,yaw = tf.transformations.euler_from_quaternion(rotation)\n unit_vec = np.array([np.cos(yaw), np.sin(yaw),0.0])\n self.theta_phi[i][j] = (np.arccos(np.dot(vec1,unit_vec)/np.linalg.norm(vec1)))\n dist.append(np.linalg.norm([self.current_poses[0][i].position.x - self.goals_x[j],self.current_poses[0][i].position.y - self.goals_y[j]]))\n\n self.probability_goal_window[i][self.itr] = self.mv_nd.pdf(np.array(self.theta_phi[i]));\n\n self.probability_goal[i] = np.array([1.0]*self.goal_num)\n for k in range(0,len(self.probability_goal_window[i])):\n gf = np.exp((k-self.window_size)/5)\n self.probability_goal[i] = np.power(self.probability_goal_window[i][k],gf)* np.array(self.probability_goal[i]) # Linear prediction of goal\n # print(self.probability_goal[i])\n\n for ln in range(0,len(self.goals_x)):\n self.probability_goal[i][ln] = (1/dist[ln])*self.probability_goal[i][ln];\n\n self.probability_goal[i] = (self.probability_goal[i]-np.min(self.probability_goal[i]))/(np.max(self.probability_goal[i])-np.min(self.probability_goal[i]))\n\n # print(sum(self.probability_goal[i]))\n\n self.itr = self.itr + 1\n if self.itr == self.window_size:\n self.itr = 0\n\n self.done = True\n\n self.predict_goal()\n\n\n def predict_goal(self):\n idx = 0\n max_prob = 0.0\n p_goal = PredictedGoal()\n\n for i in range(0,len(self.current_poses[0])):\n for j in range(0,len(self.goals_x)):\n if(max_prob 1:\n trusted_json_path = sys.argv[1]\n\ndef get_txt(domain):\n try:\n txt_data = json.loads(json.dumps(requests.get(\n 'https://dns-api.org/TXT/' + domain).json()))\n txt_value = txt_data[0]['value']\n if txt_value.find('NODE_ADDRESS') != -1:\n txt_address = txt_value.split('=')[1]\n return txt_address\n except:\n return ''\n\n\ndef get_address(url):\n try:\n response = requests.get(url)\n if not response.status_code // 100 == 2:\n return \"Error: Unexpected response {}\".format(response)\n\n return response.text\n except requests.exceptions.RequestException as e:\n # A serious problem happened, like an SSLError or InvalidURL\n return \"Error: {}\".format(e)\n\n\nfor n in range(0, int(math.ceil(nblocks / 100.0))):\n for block in requests.get('%s/blocks/seq/%d/%d' % (node, last - nblocks + n * 100, min(last, last - nblocks + (n + 1) * 100 - 1))).json():\n generators.append(\n (str(block['generator']), float(block['fee']) / 100000000))\n\nfor generator in set([x[0] for x in generators]):\n fees = sum(g[1] for g in filter(lambda x: x[0] == generator, generators))\n count = sum(1 for g in filter(lambda x: x[0] == generator, generators))\n total_fees += fees\n generator_balance = float(requests.get(\n node + '/consensus/generatingbalance/' + generator).json()['balance']) / 100000000\n unique_generators.append((generator, generator_balance, count, fees))\n total_balance += generator_balance\n\nfor i, generator in enumerate(sorted(unique_generators, key=lambda x: -x[1])):\n aliases = requests.get(\n node + '/addresses/alias/by-address/' + generator[0]).json()\n for alias in aliases:\n domain = alias.split(':')[2]\n if domain not in blacklist:\n address_txt_url = 'https://' + domain + '/address.txt'\n node_address = get_address(address_txt_url)\n dns_address = get_txt(domain)\n if str(generator[0]) in (node_address, dns_address):\n data['node'].append({'domain': domain, 'address': str(generator[0]), 'balance': str(generator[1]), 'share': str(\n generator[1] / total_balance * 100), 'blocks': str(generator[2]), 'fees': str(generator[3])})\n\n\nwith open(trusted_json_path, 'w') as outfile:\n json.dump(data, outfile)\n","sub_path":"trusted_nodes.py","file_name":"trusted_nodes.py","file_ext":"py","file_size_in_byte":2737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"85866646","text":"#----------------------------------------------------------------------\n# appentrypage.py\n#\n# (C)2005 Kim, Hyoun Woo\n#----------------------------------------------------------------------\nimport wx\nimport wx.wizard as wiz\nimport glob, os, time\nfrom common import *\n\n#----------------------------------------------------------------------\n# App Main Entry page\n#----------------------------------------------------------------------\nclass AppEntryPage(wiz.WizardPageSimple):\n def __init__(self, parent, title):\n wiz.WizardPageSimple.__init__(self, parent)\n\n # create a sizer of this page.\n self.sizerA = makePageTitle(self, title)\n\n self.appNameLabel = wx.StaticText(self, -1, 'App Name')\n self.appNameTextBox = wx.TextCtrl(self, -1, '', (0, 0), (250, 21))\n\n self.appObjNameLabel = wx.StaticText(self, -1, 'App Object Name')\n self.appObjNameTextBox = wx.TextCtrl(self, -1, '', (0, 0), (250, 21)) \n\n # script server\n scriptServerList = ['ntclserver', 'npythonserver', 'nluaserver', 'nrubyserver']\n\n self.scriptLabel = wx.StaticText(self, -1, 'Script Server')\n self.scriptComboBox = wx.ComboBox(self, 500, 'ntclserver', (0, 0),\n (200, 20), scriptServerList, wx.CB_DROPDOWN #|wxTE_PROCESS_ENTER\n )\n\n # startup script\n '''\n self.startupScriptLabel = wx.StaticText(self, -1, 'Startup Script')\n self.startupScriptTextBox = wx.TextCtrl(self, -1, '', (0,0), (250, 21))\n\n self.startupScriptDirBtn = wx.Button(self, -1, 'Browse...')\n self.Bind(wx.EVT_BUTTON, self.OnStartupScriptBtn, self.startupScriptDirBtn)\n '''\n\n # Layout\n sizerB = wx.GridBagSizer(4, 5)\n\n sizerB.Add(self.appNameLabel, (0, 0), \n flag = wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL)\n sizerB.Add(self.appNameTextBox, (0, 1), flag = wx.EXPAND)\n\n sizerB.Add(self.appObjNameLabel, (1, 0), \n flag = wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL)\n sizerB.Add(self.appObjNameTextBox, (1, 1), flag = wx.EXPAND)\n\n sizerB.Add(self.scriptLabel, (2, 0), \n flag = wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL)\n sizerB.Add(self.scriptComboBox, (2, 1),\n flag = wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL)\n\n '''\n sizerB.Add(self.startupScriptLabel, (3, 0), \n flag = wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL)\n sizerB.Add(self.startupScriptTextBox, (3, 1),\n flag = wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL)\n sizerB.Add(self.startupScriptDirBtn, (4, 1),\n flag = wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL)\n '''\n\n self.sizerA.Add(sizerB, 0, wx.ALL, 5)\n self.sizerA.Fit(self)\n self.SetSizer(self.sizerA)\n\n #----------------------------------------------------------------------\n #\n #----------------------------------------------------------------------\n '''\n def OnStartupScriptBtn(self, evt):\n wildcard = \"Nebula2 files (*.n2)|*.n2|\" \\\n \"Tcl files (*.tcl)|*.tcl|\" \\\n \"Python files (*.py)|*.py|\" \\\n \"Lua files (*.lua)|*.lua|\" \\\n \"Ruby file (*.rb)|*.rb|\" \\\n \"All files (*.*)|*.*\"\n\n dlg = wx.FileDialog(self, \"Choose startup script file:\", \n os.getcwd(), defaultFile='', wildcard=wildcard,\n style=wx.OPEN | wx.CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n self.startupScriptTextBox.SetValue(dlg.GetPath())\n dlg.Destroy()\n '''\n\n #----------------------------------------------------------------------\n #\n #----------------------------------------------------------------------\n def validate(self):\n valid = True\n return valid\n\n#----------------------------------------------------------------------\n# EOF\n#----------------------------------------------------------------------\n","sub_path":"nebula2/appwiz/appentrypage.py","file_name":"appentrypage.py","file_ext":"py","file_size_in_byte":4015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"325968190","text":"# -*- coding:utf-8 -*-\n#@author:Fillico\n#@date:2019/3/13 13:32\n\n#-------题目:\n# 1:作业安排:\n# 写一个类:里面有一个方法 http_request 能够完成get请求或post请求,要求有返回值\n# 每个请求要求有请求参数\n# 登录请求地址:http://47.107.168.87:8080/futureloan/mvc/api/member/login\n# 请求参数:mobilephone:18688773467 pwd:123456 登录的时候需要提供手机号码和密码\n\nimport requests\nclass HttpRequest: #定义一个http请求的类,使用get和post请求时,返回响应数据\n def http_request(self,url,method=\"post\",**kwargs):\n if method.upper()=='GET':\n resp = requests.get(url,params=None, **kwargs)\n elif method.upper()=='POST':\n resp = requests.post(url,data=None, json=None, **kwargs)\n return resp #返回响应数据\nif __name__=='__main__':\n url='http://test.lemonban.com//futureloan/mvc/api/member/register'\n params={'mobilephone':'18688773467','pwd':'123456'}\n login=HttpRequest() #创建对象\n resu=login.http_request(url,params=params).json() #对象调用类里面的http_request方法\n # print(\"状态码为:{}\\n响应头为:{}\\n响应报文为:{}\".format(resu.status_code,resu.headers,resu.text))\n # print(type(resu))\n print(resu)\n print(resu['msg'])\n\n\n\n","sub_path":"api_1/http_request_module.py","file_name":"http_request_module.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"417814293","text":"import requests\nfrom pprint import pprint\nfrom urllib.parse import urljoin\n\n\nAPP_ID = '4897d036e1fa4029b38618ceee194ba7'\nAUTH_URL = 'https://oauth.yandex.ru/authorize'\n\nauth_data = {\n 'response_type': 'token',\n 'client_id': APP_ID\n}\n\n# print('?'.join((AUTH_URL, urlencode(auth_data))))\n\n_TOKEN = 'AQAAAAAKpzb1AAQxfv_b08ESXEFmglNdM-6MCVI' # полученный токен\n\nclass YandexBase():\n\n def __init__(self, token = None):\n self.token = token\n\n def get_header(self):\n return {\n 'Content-Type': 'application/json',\n 'Authorization': 'OAuth {}'.format(self._TOKEN),\n 'User-Agent': 'asdasdasd'\n }\n\n\nclass YandexManagement(YandexBase):\n _MANAGEMENT_URL = 'https://api-metrika.yandex.ru/management/v1/'\n\n def counter_list(self):\n url = urljoin(self._MANAGEMENT_URL, 'counters')\n headers = self.get_header()\n response = requests.get(url, headers=headers)\n counters_list = response.json()['counters']\n return counters_list\n\n\nclass YandexCounters(YandexBase):\n _STAT_URL = 'https://api-metrika.yandex.ru/stat/v1/'\n\n def get_count_visits(self, counter_id):\n url = urljoin(self._STAT_URL, 'data')\n headers = self.get_header()\n params = {\n 'id': counter_id,\n 'metrics': 'ym:s:visits'\n }\n response = requests.get(url, params, headers=headers)\n visits_count = response.json()['data'][0]['metrics'][0]\n return visits_count\n\n def get_count_pageviews(self, counter_id):\n url = urljoin(self._STAT_URL, 'data')\n headers = self.get_header()\n params = {\n 'id': counter_id,\n 'metrics': 'ym:s:pageviews'\n }\n response = requests.get(url, params, headers=headers)\n visits_count = response.json()['data'][0]['metrics'][0]\n return visits_count\n\n def get_count_users(self, counter_id):\n url = urljoin(self._STAT_URL, 'data')\n headers = self.get_header()\n params = {\n 'id': counter_id,\n 'metrics': 'ym:s:users'\n }\n response = requests.get(url, params, headers=headers)\n visits_count = response.json()['data'][0]['metrics'][0]\n return visits_count\n\n\nmanagement = YandexManagement()\ncounters = YandexCounters()\npprint(management.counter_list())\nprint('-----------------------------------')\nprint('Количество визитов: ', counters.get_count_visits(44132474))\nprint('-----------------------------------')\nprint('Количество просмотров: ', counters.get_count_pageviews(44132474))\nprint('-----------------------------------')\nprint('Количество пользователей: ', counters.get_count_users(44132474))","sub_path":"PY-3.5/yandex_metrik_classes.py","file_name":"yandex_metrik_classes.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"369237010","text":"# make fold\nimport PySimpleGUI as sg\nfrom re import findall\n\n##chdir('')\n\nplaying = 0\nhighlight_label=None\n\nwindow_size=(1025,675)\n\ngray = '#F0F0F0'\nblack_gray='#e8e9ec'\nbackground = '#ffffff'\nwhite = '#ffffff'\nbackground_toolbar = '#b73226'\n\nsg.SetOptions(background_color=white,\n font=(\"Helvetica\", 9),\n margins=(0,0),\n element_background_color=white,\n element_padding=((0,0),(0,0))\n )\n\n# Images are located in a subfolder in the Demo Media Player.py folder\nimage_pause = './icons/stop.png'\nimage_start = './icons/start.png'\nimage_next = './icons/next.png'\nimage_forward = './icons/forward.png'\nimage_undo = './icons/undo.png'\nimage_redo = './icons/redo.png'\nimage_change_theme = './icons/change_theme.png'\nimage_setting = './icons/setting.png'\nimage_min = './icons/min.png'\nimage_max = './icons/max.png'\nimage_close = './icons/close.png'\nimage_search = './icons/afd.png' #'./icons/s.png'\nimage_find = './icons/find.png'\n\nUSE_MARK=1\nUSE_SPETAER=1\n\nadd_mian_panel=0 # only for test\n\npad_palybar_left_toolbar=130\npad_logo_undo=18\npad_redo_search=2\npad_search_theme=120\npad_left_toolbar_text=7\n\nsize_left_toolbar_text=(20,2)\n\n\ndict_recommend = {\n 'find': ('./icons/find.png', '发现音乐'),\n 'fm': ('./icons/fm.png', '私人FM'),\n 'mv': ('./icons/mv.png', 'MV'),\n 'friend': ('./icons/friend.png', '朋友'),\n}\ndict_my_music = {\n 'local': ('./icons/local_music.png', '本地音乐'),\n 'download': ('./icons/download.png', '下载管理'),\n 'cloud': ('./icons/cloud.png', '我的音乐云盘'),\n 'singer': ('./icons/singer.png', '我的歌手')\n}\ndict_maked_musiclist = {\n 'favorite': ('./icons/favorite.png', '我喜欢的音乐'),\n 'musiclist': ('./icons/music_list.png', '自定义歌单')\n}\n\ndef get_playbar_location(button_h=50,pad=10):\n c_l=window.current_location()\n return (0, c_l[1]-button_h)\n# define the toolbar\ncolumn_toolbar = [\n [sg.Button('', image_filename='./icons/toolbar_logo.png',\n button_color=(background_toolbar, background_toolbar),key='logo',\n border_width=0,)] +[sg.Text(' ' * pad_logo_undo, background_color=background_toolbar)]+ [\n sg.Button('',\n button_color=(background_toolbar, background_toolbar),\n image_filename=x,\n image_size=(20, 40),\n image_subsample=2,\n border_width=0,\n pad=((0, 0), (0, 0)),\n key=y)\n for x, y in zip([image_undo, image_redo], ['undo', 'redo'])\n ] +[sg.Text(' ' * pad_redo_search, background_color=background_toolbar)]+ [\n sg.Input('search music here',\n text_color='white',\n size=(30, 1),\n key='search_text',\n background_color=background_toolbar)\n ] + [\n sg.Button('',\n button_color=(background_toolbar, background_toolbar),\n image_filename=image_search,\n image_size=(20, 20),\n image_subsample=3,\n border_width=0,\n pad=((0, 0), (0, 0)),\n key='search',\n bind_return_key=True)\n ] + [sg.Text(' ' * pad_search_theme, background_color=background_toolbar)] + [\n sg.Button('',\n button_color=(background_toolbar, background_toolbar),\n image_filename=x,\n image_size=(30, 30),\n image_subsample=1,\n border_width=0,\n pad=((0, 0), (0, 0)),\n key=y) for x, y in zip([\n image_change_theme, image_setting, image_min, image_max,\n image_close\n ], ['change_theme', 'setting', 'min', 'max', 'close'])\n ]\n]\n\n# define the left toolbar\ncolumn_left_toolbar = [[\n sg.Text('推荐', background_color=white, pad=((0,0),(1,1)),key='recommand')\n]] + [[\n sg.Column([[\n sg.Button('',\n button_color=(white, white),\n image_filename=x,\n image_size=(30, 30),\n image_subsample=1,\n border_width=0,\n pad=((12, 0), 1),\n key=y),\n sg.Text(z, enable_events=True,size=size_left_toolbar_text,\n key=y + '_text',pad=((0,0),(pad_left_toolbar_text,0)))\n ]],\n key=y + '_column',\n pad=((0, 0), (0, 0)),\n size=(200, 34)\n )\n] for y, (x, z) in dict_recommend.items()] + [[\n sg.Text('我的音乐', background_color=white, pad=((0,0),(1,1)), key='mymusic')\n]] + [[\n sg.Column([[\n sg.Button('',\n button_color=(white, white),\n image_filename=x,\n \n image_size=(30, 30),\n image_subsample=1,\n border_width=0,\n pad=((0, 0), 1),\n key=y),\n sg.Text(z, enable_events=True,size=size_left_toolbar_text,\n key=y + '_text',pad=((0,0),(pad_left_toolbar_text,0)))\n ]],\n key=y + '_column',\n pad=(12, 0),\n size=(200, 34)\n )\n] for y, (x, z) in dict_my_music.items()] + [[\n sg.Text('创建的歌单', background_color=white, pad=((0,0),(1,1)), key='createdmusiclist')\n]] + [[\n sg.Column([[\n sg.Button('',\n button_color=(white, white),\n image_filename=x,\n image_size=(30, 30),\n image_subsample=1,\n border_width=0,\n pad=((0, 0), 1),\n key=y),\n sg.Text(z, enable_events=True,size=size_left_toolbar_text,\n key=y + '_text',pad=((0,0),(pad_left_toolbar_text,0)))\n ]],\n key=y + '_column',\n pad=(12, 0),\n size=(200, 34)\n )\n] for y, (x, z) in dict_maked_musiclist.items()]\n\n\n# define the main panel\n\npad_main_panel_board_text=30\npad_main_panel_middle_text=5\n\ndict_m_p_text={'personal_rcmd':'个性推荐','musiclist_m_p':'歌单',\n 'anchor_radio':'主播电台','leader_board':'排行榜','lastest_music':'最新音乐'}\ncolumn_main_panel=[\n [sg.Text(v,key=k) for k,v in dict_m_p_text.items()],\n [sg.Image('./icons/avatar.png',size=(300,50))],\n [sg.Text('推荐歌单',key='rcmd_music_list')],\n [sg.Image('./icons/avatar.png',size=(300,50))],\n]\n\n# define the playbar\ncolumn_playbar = [\n [sg.Button('move_l_t_down',visible=False),sg.Button('move_l_t_up',visible=False),]+\n [sg.Image('./icons/avatar.png',size=(50,50),pad=((0,0),(0,0))),\n sg.Column([[sg.Text('琵琶行',key='music_name',),sg.Image('./icons/favorite.png',key='love_or_no')],\n [sg.Text('奇然/沈谧仁',key='author',),sg.Image('./icons/share.png',key='share')]])],\n [\n sg.Button('',\n button_color=(background, background),\n image_filename=image_forward,\n image_size=(50, 50),\n image_subsample=1,\n border_width=0,\n key='forward'),\n \n sg.Button('',\n button_color=(background, background),\n image_filename=image_start,\n image_size=(30, 30),\n image_subsample=1,\n border_width=0,\n size=((50,50)),\n key='start_or_stop'),\n \n sg.Button('',\n button_color=(background, background),\n image_filename=image_next,\n image_size=(50, 50),\n image_subsample=1,\n border_width=0,\n key='next'),\n ]]\n\n\nlayout = [\n [\n sg.Column(\n column_toolbar,\n background_color=background_toolbar,\n size=(1100, 50),\n pad=((0,0),(0,0)),\n key='toolbar'\n )\n ],\n [sg.Column(column_left_toolbar, background_color=white,\n key='left_toolbar',)]+\n ([sg.Text(' ',key='sperater',background_color=gray,font=(\"Helvetica\", 1))] if USE_SPETAER else [])+\n ([sg.Text(' ',background_color=background_toolbar,\n key='mark_text',font=(\"Helvetica\", 1))] if USE_MARK else []) \n +([sg.Column(column_main_panel)] if add_mian_panel else []) ,\n \n [sg.Column(\n column_playbar,\n background_color=white,\n key='playbar',\n pad=((0,0),(pad_palybar_left_toolbar,0))\n )],\n]\n\n# generate the window\n\nwindow = sg.Window(\n 'Media File Player',\n layout,\n auto_size_text=True,\n# font=(\"Helvetica\", 25),\n size=window_size,\n no_titlebar=False,\n margins=(0,0),\n resizable=True,\n finalize=True\n) #default_element_size=(20, 1),\n\ndef get_elem_size_and_posi(e,refresh=0,window=window):\n if refresh: window.Finalize()\n return [int (x) for x in findall('\\d+',window.Elem(e).Widget.winfo_geometry())]\n\ng=get_elem_size_and_posi\n\ndef place(e,x,y,refresh=0):\n if refresh: window.Finalize()\n window.Elem(e).Widget.place(x=x,y=y)\n\n\ndef ready_to_get_all_thing(Column,all_thing,pattern=['Text','Button'],):\n if type(Column)==list:\n for x in Column:\n ready_to_get_all_thing(x,all_thing,pattern)\n elif 'Column' in str(Column) :\n if 'Column' in pattern:all_thing.append(Column)\n for x in Column.Rows:\n ready_to_get_all_thing(x,all_thing,pattern)\n else:\n for x in pattern:\n if x in str(Column):\n# print(Column)\n all_thing.append(Column)\n break\n# print(Column)\n \ndef get_all_elem(Column,pattern=['Text','Button'],total=0):\n all_thing=[]\n ready_to_get_all_thing(Column,all_thing,pattern)\n return [x for x in all_thing if x.Key not in ['recommand','mymusic','createdmusiclist']] if total==0 else [x for x in all_thing if x.Key]\n\ndef get_correct_x(event):\n index=all_short_key_in_left_toolbar.index(event)\n return 23*(1+index//4) + 33*index\n\ndef move_left_toolbar(direction='down',step=10,):\n place('left_toolbar',0, -g('left_toolbar')[3] + step*(1 if direction=='down' else -1))\n \n# update size and position\n# window.Elem('123').Widget.place(x=100,y=20)\n# input('Continue...')\n# window.Elem('toolbar').Update(visible=False)\n# window.Finalize()\n# window.Elem('toolbar').Update(visible=True)\n\n# input('Continue...')\n\nsize_of_left_toolbar=get_elem_size_and_posi('left_toolbar')\n\n# mark_text=\nall_key_in_left_toolbar=[ x.Key for x in get_all_elem(window.Elem('left_toolbar'))]\nall_short_key_in_left_toolbar=[x for x in all_key_in_left_toolbar if '_' not in x]\n\ndef init():\n if USE_SPETAER: \n window.Elem('sperater').set_size((5,size_of_left_toolbar[1]))\n place('sperater',g('sperater')[2]-40,0)\n \n if USE_MARK: \n window.Elem('mark_text').set_size((1,10))\n place('mark_text',-3,get_correct_x('find'))\n window.Elem('mark_text').set_size((1,10))\n \n window.Elem('start_or_stop').Update(image_filename=image_start,)\n window['start_or_stop'].set_size((50,50))\n \n window['search_text'].Widget.config(insertbackground=white)\n \n# window['find_column'].Widget.bind(\"\", window['search'].ButtonReboundCallback)\n# window['left_toolbar'].Widget.bind(\"\", window['search'].ButtonReboundCallback)\n\n for x in get_all_elem(window['left_toolbar'],pattern=['Text', 'Button', 'Column'],total=1):\n x.Widget.bind(\"\", window['move_l_t_down'].ButtonReboundCallback)\n x.Widget.bind(\"\", window['move_l_t_up'].ButtonReboundCallback)\n \n# x.expand((True,True),)\n# if 'Text' in str(x):\n# place(x.Key,g(x.Key)[2],0)\n \n \n \n \ninit()\n\n# print(all_short_key_in_left_toolbar)\n# assert 0\n# Our event loop\n\nwhile (True):\n event, values = window.Read() # Poll every 100 ms\n if event in ('close', 'Exit', None):\n# print(window.size)\n break\n elif event == 'start_or_stop':\n if playing:\n window.Elem('start_or_stop').Update(\n image_filename=image_start,\n )\n else:\n window.Elem('start_or_stop').Update(\n image_filename=image_pause,\n )\n playing = not playing\n window['start_or_stop'].set_size((50,50))\n \n elif event == 'logo':\n# e = window.Elem('cloud_column')\n# print(e.get_size())\n while 1:\n try:\n input_ = input('>>> ')\n if input_ == 'b': break\n else: \n exec(input_)\n window.Finalize()\n window.refresh()\n except Exception as e :\n print(e)\n \n elif event in all_key_in_left_toolbar:\n if highlight_label:\n window.Elem(highlight_label).Update(button_color=(white,white))\n window.Elem(highlight_label+'_text').Update(background_color=white)\n short_key=event.split('_')[0]\n window.Elem(short_key).Update(button_color=(black_gray,black_gray))\n window.Elem(short_key+'_text').Update(background_color=black_gray)\n \n window.Elem(short_key+'_column').Widget.configure(background=background_toolbar)\n if USE_MARK: place('mark_text',-3,get_correct_x(short_key))\n highlight_label=short_key\n elif event == 'min':\n window.minimize()\n \n elif event in ['move_l_t_down', 'move_l_t_up']:\n move_left_toolbar(event.split('_')[-1], )\n \n if event != sg.TIMEOUT_KEY:\n print(event)\n window.Element('recommand').Update(event)\n \nwindow.Close()\n\n''' \nprint(window.Elem('123').Widget.winfo_geometry()) #绝对位置\nprint(window.current_location)\n\n'''\n","sub_path":"d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":13766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"70667718","text":"# -*- coding: utf-8 -*-\nimport xgboost as xg\n# Create the training and test sets\nX_train, X_test, y_train, y_test = train_test_split(____, ____, ____=____, random_state=123)\n\n# Instantiate the XGBRegressor: xg_reg\nxg_reg = ____\n\n# Fit the regressor to the training set\n____\n\n# Predict the labels of the test set: preds\npreds = ____\n\n# Compute the rmse: rmse\nrmse = ____(____(____, ____))\nprint(\"RMSE: %f\" % (rmse))\n","sub_path":"XGBoost_python/practice1.py","file_name":"practice1.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"446215558","text":"from tabulate import tabulate\nimport numpy as np\nimport math\n\nfacts = {} # cache for factorials\n\n\n# calculating u mentioned in the formula\ndef u_cal(u_loc, num):\n temp = 1\n for i in range(num):\n temp = temp * (u_loc - i)\n return temp\n\n\n# calculating factorial of given number n\ndef fact(val):\n if val <= 1:\n return 1\n if i in facts:\n return facts[i]\n facts[i] = val * fact(val - 1)\n return facts[i]\n\n\n# Displaying the forward difference table\ndef print_table(interval_x, output_y):\n table = np.zeros((n, n + 1))\n table[:, 0] = interval_x\n table[:, 1:] = output_y\n headers = ['X', 'Y']\n indentation = ' ' + '\\t' * (n + 1)\n print(f\"\\n {indentation}Difference Table\".upper())\n for i in range(1, n):\n headers.append(f\"del_{i}_y\")\n print(tabulate(table, headers=headers, tablefmt='psql'))\n\n\nif __name__ == '__main__':\n\n n = 5\n x = [45, 50, 55, 60, 65]\n\n # difference table\n y = np.zeros((n, n))\n y[:, 0] = [0.7071, 0.7660, 0.8192, 0.8660, np.sin(math.radians(x[-1]))]\n\n # Calculating the forward difference\n # table\n for i in range(1, n):\n for j in range(n - i):\n y[j][i] = y[j + 1][i - 1] - y[j][i - 1]\n\n print_table(x, y)\n\n # Value to interpolate at\n value = 52\n\n # initializing u and sum\n sum = y[0][0]\n u = (value - x[0]) / (x[1] - x[0])\n for i in range(1, n):\n sum += (u_cal(u, i) * y[0][i]) / fact(i)\n\n print(f\"\\nValue at {value} is: {round(sum, 6)}\")\n","sub_path":"Numerical Methods/newton_forward.py","file_name":"newton_forward.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"163062215","text":"\"\"\"\nAnalysis of behavioural data\n\nAnalysis of the behavioural performance of the dummy fixation task during scanning. The script \nessentially performs the same calculation as directly done automatically after single functional \nruns.\n\ncreated by Daniel Haenelt\nDate created: 01-10-2019 \nLast modified: 01-10-2019 \n\"\"\"\nimport os\nimport numpy as np\nfrom scipy.io import loadmat\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\n\ninput = [\n \"/data/pt_01880/Experiment3_Stripes/p1/colour/GE_EPI4/Run_1/logfiles/p1_GE_EPI4_Run1_colour.mat\",\n \"/data/pt_01880/Experiment3_Stripes/p1/colour/GE_EPI4/Run_2/logfiles/p1_GE_EPI4_Run2_colour.mat\",\n \"/data/pt_01880/Experiment3_Stripes/p1/colour/GE_EPI4/Run_3/logfiles/p1_GE_EPI4_Run3_colour.mat\",\n \"/data/pt_01880/Experiment3_Stripes/p1/colour/GE_EPI4/Run_4/logfiles/p1_GE_EPI4_Run4_colour.mat\",\n \"/data/pt_01880/Experiment3_Stripes/p1/colour/GE_EPI4/Run_5/logfiles/p1_GE_EPI4_Run5_colour.mat\",\n \"/data/pt_01880/Experiment3_Stripes/p1/colour/GE_EPI4/Run_6/logfiles/p1_GE_EPI4_Run6_colour.mat\",\n \"/data/pt_01880/Experiment3_Stripes/p1/colour/GE_EPI4/Run_7/logfiles/p1_GE_EPI4_Run7_colour.mat\",\n \"/data/pt_01880/Experiment3_Stripes/p1/colour/GE_EPI4/Run_8/logfiles/p1_GE_EPI4_Run8_colour.mat\",\n \"/data/pt_01880/Experiment3_Stripes/p1/colour/GE_EPI4/Run_9/logfiles/p1_GE_EPI4_Run9_colour.mat\",\n \"/data/pt_01880/Experiment3_Stripes/p1/colour/GE_EPI4/Run_10/logfiles/p1_GE_EPI4_Run10_colour.mat\",\n ]\n\n\"\"\" do not edit below \"\"\"\n\n# font parameters for plots\nrc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\n\nfor i in range(len(input)):\n \n # get output path and filename\n path_output = os.path.dirname(input[i])\n \n # get mat-file\n FixationData = loadmat(input[i])[\"FixationData\"]\n\n # number of responses\n response = 0\n change = 0\n for j in range(len(FixationData)):\n if FixationData[j,0] == 3:\n response += 1\n else:\n change += 1\n\n # number of hits, misses and rt\n change_miss = 0\n change_hit = 0\n rt = []\n for j in range(len(FixationData)-1):\n if FixationData[j,0] != 3 and FixationData[j+1,0] != 3:\n change_miss += 1\n elif FixationData[j,0] != 3 and FixationData[j+1,0] == 3:\n change_hit += 1\n rt.append((FixationData[j+1,1]- FixationData[j,1])*1000)\n\n # add a miss if the run does not end with a response\n if FixationData[-1,0] != 3:\n change_miss += 1\n\n # compute error rate\n error_rate = change_miss / change * 100\n\n # reaction time mean and std\n mean_rt = np.mean(rt)\n std_rt = np.std(rt)\n\n # output\n fileID = open(os.path.join(path_output,\"fixation_task_summary.txt\"),\"w\")\n fileID.write(\"Number of changes: %i\\n\" % change)\n fileID.write(\"Number of responses: %i\\n\" % response)\n fileID.write(\"Number of hits: %i\\n\" % change_hit)\n fileID.write(\"Number of misses: %i\\n\" % change_miss)\n fileID.write(\"Error rate: %.2f %%\\n\" % error_rate)\n fileID.write(\"Mean RT: %.2f ms\\n\" % mean_rt)\n fileID.write(\"Corresponding SD: %.2f ms\" % std_rt)\n fileID.close()\n\n # hist plot\n fig, ax = plt.subplots()\n ax.hist(rt)\n ax.set_xlabel(\"RT in ms\")\n ax.set_ylabel(\"Number of responses\")\n ax.set_title(\"Dummy fixation task reaction times\")\n fig.savefig(os.path.join(path_output,\"fixation_task_hist.png\"), format='png', bbox_inches='tight')\n #plt.show()","sub_path":"behavior/analysis_fixation.py","file_name":"analysis_fixation.py","file_ext":"py","file_size_in_byte":3501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"379575586","text":"import numpy as np\nimport pandas as pd\nfrom nltools.stats import one_sample_permutation,two_sample_permutation,correlation_permutation\n\ndef test_permutation():\n\tdat = np.random.multivariate_normal([2,6],[[.5,2],[.5,3]],100)\n\tx = dat[:,0]\n\ty = dat[:,1]\n\tstats = two_sample_permutation(x,y)\n\tassert (stats['mean'] < -2) & (stats['mean'] > -6)\n\tassert stats['p']< .001\n\tprint(stats)\n\tstats = one_sample_permutation(x-y)\n\tassert (stats['mean'] < -2) & (stats['mean'] > -6)\n\tassert stats['p']< .001\n\tprint(stats)\n\tstats = correlation_permutation(x,y)\n\tassert (stats['correlation']>.4) & (stats['correlation']<.85)\n\tassert stats['p']< .001\n","sub_path":"nltools/tests/test_stats.py","file_name":"test_stats.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"581812334","text":"#!/usr/bin/env python\n\nfrom html.parser import HTMLParser\nfrom urllib.parse import urljoin\n\nclass LinkParser(HTMLParser):\n def __init__(self):\n self.anchorlist = []\n super().__init__()\n \n def handle_starttag(self, tag, attrs):\n if (tag != 'a'): return\n for a,v in attrs:\n if a == 'href': self.anchorlist.append(v)\n\ndef links(file):\n try:\n p = LinkParser(); p.feed(open(file).read()); p.close()\n return filter(lambda x: ':' not in x, p.anchorlist)\n except:\n return []\n\ndef crawl(file, visited):\n if file.endswith('/') or file.endswith('.'):\n file = urljoin(file, 'index.html')\n \n if file and file not in visited:\n visited.add(file)\n base = file[0:file.rfind('/')+1]\n \n for f in links(file):\n visited = crawl(urljoin(base, f), visited)\n \n return visited\n\nsite = list(crawl('html/', set())); site.sort()\n\nfor i in site: print(i)","sub_path":"spyder.py","file_name":"spyder.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"543932318","text":"from list_node import ListNode\nfrom util.regex import replace_specific_word\n\nclass LinkedList():\n \"\"\"A self-ordering linked list based on node values\"\"\"\n\n def __init__(self, max_length, head = None):\n self.head = head\n self.tail = None\n self.max_length = max_length\n self.length = 1 if self.head else 0\n\n def add(self, node):\n \"\"\"Add a new node to the list, reordering if necessary\"\"\"\n\n if self.length == 0:\n self.head = node\n elif self.length == 1:\n node.set_parent(self.head)\n self.head.set_child(node)\n elif self.length < self.max_length:\n node.set_parent(self.tail)\n self.tail.set_child(node)\n elif node.get_value() > self.tail.get_value():\n node.set_parent(self.tail.parent)\n self.tail.parent.set_child(node)\n self.tail.set_parent(None)\n self.tail.set_child(None)\n self.tail.data['is_in_list'] = False\n else:\n node.data['is_in_list'] = False\n node.set_parent(None)\n node.set_child(None)\n return\n\n self.tail = node\n node.data['is_in_list'] = True\n self.reorder(node)\n if self.length < self.max_length: self.length += 1\n\n def reorder(self, node):\n \"\"\"Continuously swap node with parent if node value > parent value\"\"\"\n\n parent = node.parent\n while parent and node.get_value() > parent.get_value():\n new_parent = parent.parent\n if new_parent:\n new_parent.set_child(node)\n else:\n self.head = node\n\n child = node.child\n if child:\n child.set_parent(parent)\n else:\n self.tail = parent\n\n parent.set_parent(node)\n parent.set_child(child)\n\n node.set_child(parent)\n node.set_parent(new_parent)\n\n parent = node.parent\n\n def print_results(self):\n \"\"\"Print full, formatted search results to terminal\"\"\"\n\n current_node = self.head\n while current_node:\n data = current_node.data\n word = data['word']\n print('WORD: ' + word)\n print('DOCS: ' + str(sorted(data['docs'])))\n print('SENTENCES:')\n for sentence in data['sentences']:\n red_word = '\\033[91m' + word + '\\033[0m'\n print(replace_specific_word(word, red_word, sentence))\n\n current_node = current_node.child\n\n def nodes_forward(self, data_keys=[]):\n \"\"\"\n Testing method: Return a string of the nodes from head-to-tail\n\n `data_keys`: keys pertaining to values in the node's data that you wish\n to be printed along with its `value` (by default)\n \"\"\"\n\n forward_nodes_str = ''\n\n forward_node = self.head\n while forward_node:\n for key in data_keys:\n forward_nodes_str += str(forward_node.data[key]) + ', '\n forward_nodes_str += str(forward_node.data['value'])\n forward_node = forward_node.child\n forward_nodes_str += ' -> ' if forward_node else ''\n\n return forward_nodes_str\n\n def nodes_backward(self, data_keys=[]):\n \"\"\"\n Testing method: Return a string of the nodes from tail-to-head\n\n `data_keys`: keys pertaining to values in the node's data that you wish\n to be printed along with its `value` (by default)\n \"\"\"\n\n backward_nodes_str = ''\n\n backward_node = self.tail\n while backward_node:\n for key in data_keys:\n forward_nodes_str += str(forward_node.data[key]) + ', '\n backward_nodes_str += str(backward_node.data['value'])\n backward_node = backward_node.parent\n backward_nodes_str += ' -> ' if backward_node else ''\n\n return backward_nodes_str\n","sub_path":"lib/linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"122093691","text":"# 导入模块\nimport pygame\n# 初始化pygame\npygame.init()\n# 创建游戏窗口\n# set_mode([窗口的宽度, 窗口的高度]) 单位均是像素\nwindow = pygame.display.set_mode([512, 768])\n# 加载本地图片\nico_img = pygame.image.load(\"res/app.ico\")\n\n# 设置游戏窗口的icon\npygame.display.set_icon(ico_img)\n\n# 设置游戏窗口的文字\npygame.display.set_caption(\"飞机大战\")\n\n# 加载本地图片\nbg_img = pygame.image.load(\"res/img_bg_level_1.jpg\")\n\n\n# 加载本地图片\nplane_img = pygame.image.load(\"res/hero2.png\")\n# 获取图片矩形\nplane_img_rect = plane_img.get_rect()\n# \n# x, y, w, h\n# 获取图片矩形各个值 -> 通过下标索引plane_img_rect[2]\nprint(plane_img_rect)\n\nwhile True:\n # 添加图片到游戏窗口中\n window.blit(bg_img, (0, 0))\n # 添加图片到游戏窗口中\n window.blit(plane_img, (plane_img_rect[0], plane_img_rect[1]))\n # 刷新游戏窗口\n pygame.display.update()\n\n #修改 x坐标和y坐标\n # move_ip(每次增加 或者减小的像素, 每次增加 或者减小的像素)\n plane_img_rect.move_ip(1, 1)","sub_path":"pythonstage1/飞机大战/06-图片矩形.py","file_name":"06-图片矩形.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"121057381","text":"# Create Trivia Episode\r\n# \r\n# Create a trivia game episode that tests a players knowledge of\r\n# Python files and exceptions.\r\n# My Notes: While the challenge was to just write a text file\r\n# I thought it would be more fun to write a program to make the\r\n# trivia episodes\r\n\r\ndef intro():\r\n\tprint(\"\\t\\tTrivia Episode Creator\")\r\n\tprint(\"\\n\\tA program to create trivia episodes for trivia game\\n\")\r\n\t\r\ndef save(trivia):\r\n\tfilename = input(\"File name to save as: \")\r\n\tfilename += \".txt\"\r\n\t\r\n\tepisodefile = open(filename, \"w\")\r\n\tepisodefile.writelines(trivia)\r\n\tepisodefile.close()\r\n\t\r\n\tprint(filename, \"saved\")\r\n\t\r\ndef new_trivia():\r\n\ttitle = input(\"Title of new trivia: \")\r\n\ttitle += \"\\n\"\r\n\ttrivia = [title]\r\n\treturn trivia\r\n\t\r\ndef add_question(trivia):\r\n\ttopic = input(\"What is the topic?: \")\r\n\ttopic += \"\\n\"\r\n\tpoints = input(\"How many points is this question worth?: \")\r\n\tpoints += \"\\n\"\r\n\tquestion = input(\"Question (use / for new line): \")\r\n\tquestion += \"\\n\"\r\n\ta1 = input(\"Answer 1: \")\r\n\ta1 += \"\\n\"\r\n\ta2 = input(\"Answer 2: \")\r\n\ta2 += \"\\n\"\r\n\ta3 = input(\"Answer 3: \")\r\n\ta3 += \"\\n\"\r\n\ta4 = input(\"Answer 4: \")\r\n\ta4 += \"\\n\"\r\n\tcorrect = input(\"Which answer is correct(1,2,3 or 4)?: \")\r\n\tcorrect += \"\\n\"\r\n\treason = input(\"Explain answer(use / for new line): \")\r\n\treason += \"\\n\"\r\n\t\r\n\ttrivia.append(topic)\r\n\ttrivia.append(question)\r\n\ttrivia.append(a1)\r\n\ttrivia.append(a2)\r\n\ttrivia.append(a3)\r\n\ttrivia.append(a4)\r\n\ttrivia.append(correct)\r\n\ttrivia.append(reason)\r\n\ttrivia.append(points)\r\n\t\r\n\treturn trivia\r\n\t\r\ndef outro():\r\n\tprint(\"\\nThank you for using Trivior Episode Creator\")\r\n\tprint(\"Created by Dosk3n\\n\")\r\n\t\r\ndef main():\r\n\tintro()\r\n\ttrivia = new_trivia()\r\n\t\r\n\tnew_question = None\r\n\twhile new_question != \"n\":\r\n\t\ttrivia = add_question(trivia)\r\n\t\tnew_question = input(\"\\nWould you like to add another question?(Y/n): \")\r\n\t\r\n\tsave(trivia)\r\n\t\r\n\toutro()\r\n\t\r\nmain()\r\n\r\ninput = (\"\\n\\nPress the enter key to exit\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"MyChallengeCode/ch7/create_trivia_episode.py","file_name":"create_trivia_episode.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"40812259","text":"import pytest\n\nfrom decimal import Decimal\n\nfrom bolt11.compat import shorten_amount, unshorten_amount\nfrom bolt11.utils import amount_to_btc, amount_to_sat, btc_to_amount, sat_to_amount\n\n\nclass TestAmounts:\n @pytest.mark.parametrize(\n \"btc, amount\",\n [\n (Decimal(10) / 10 ** 12, \"10p\"),\n (Decimal(1000) / 10 ** 12, \"1n\"),\n (Decimal(1200) / 10 ** 12, \"1200p\"),\n (Decimal(123) / 10 ** 6, \"123u\"),\n (Decimal(123) / 1000, \"123m\"),\n (3, \"3\"),\n ],\n )\n def test_amount_to_btc(self, btc, amount):\n assert amount_to_btc(amount) == btc\n assert btc_to_amount(btc) == amount\n # compat test\n assert shorten_amount(btc) == amount\n assert unshorten_amount(amount) == btc\n\n @pytest.mark.parametrize(\n \"sat, amount\",\n [\n (1, \"10n\"),\n (10, \"100n\"),\n (100, \"1u\"),\n (100000, \"1m\"),\n (100000000, \"1\"),\n (123456789, \"1234567890n\"),\n (123450000, \"1234500u\"),\n (123400000, \"1234m\"),\n ],\n )\n def test_amount_to_sat(self, sat, amount):\n assert amount_to_sat(amount) == sat\n assert sat_to_amount(sat) == amount\n\n @pytest.mark.parametrize(\"sat\", [100.5, 27.03])\n def test_sat_to_amount(self, sat):\n with pytest.raises(ValueError):\n sat_to_amount(sat)\n\n @pytest.mark.parametrize(\"amount\", [\"123x\", \"1f0\"])\n def test_invalid_amount(self, amount):\n with pytest.raises(ValueError):\n amount_to_btc(amount)\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"631258194","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport cv2\r\n# numpy is do the array operation.\r\n# matplotlib is used for plot.\r\n# os is used for iterate the directory and join path \r\n# if you do no have cv2 use \"pip install opencv-pthon\" \r\n# cv2 is used to do image operation.\r\n\r\n# Set the data directory\r\n#DATADIR =\"C:\\Work\\MicroService\\Tensorflow\\02_DeepLeaning_Tensoflow_Keara_sentdex_11\\DataSets\\PetImages\"\r\nDATADIR =\"C:/Work/MicroService/Tensorflow/02_DeepLeaning_Tensoflow_Keara_sentdex_11/DataSets/PetImages\"\r\nCATEGORIES = [\"Dog\", \"Cat\"]\r\nIMG_SIZE = 50\r\nfor category in CATEGORIES:\r\n path = os.path.join (DATADIR, category) # path to cats or dogs\r\n for img in os.listdir (path):\r\n # If we want to read grayscale\r\n imag_array = cv2.imread(os.path.join (path, img), cv2.IMREAD_GRAYSCALE)\r\n # if we want to read RGB image.\r\n #imag_array = cv2.imread(os.path.join (path, img))\r\n new_array = cv2.resize (imag_array, (IMG_SIZE, IMG_SIZE))\r\n #plt.imshow(imag_array, cmap=\"gray\")\r\n plt.imshow (new_array, cmap=\"gray\")\r\n plt.show()\r\n break\r\n break\r\nprint ('imag_array.shep: ', imag_array.shape)\r\nprint ('image_array: '); print (imag_array)","sub_path":"ch02_LoadDataset_src/ch02_LoadDataSet.py","file_name":"ch02_LoadDataSet.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"302432233","text":"import tensorflow as tf\nfrom tensorflow.contrib.training import HParams\n\ndef create_model(fingerprint_input, params, is_training):\n # if is_training:\n # dropout_prob = tf.placeholder(tf.float32, name='dropout_prob')\n # dropout_prob = tf.constant(0.5, dtype=tf.float32, name='dropout_prob')\n input_frequency_size = params.dct_coefficient_count\n input_time_size = params.spectrogram_length\n fingerprint_4d = tf.reshape(fingerprint_input,\n [-1, input_time_size, input_frequency_size, 1])\n first_filter_width = 8\n first_filter_height = 20\n first_filter_count = 64\n first_weights = tf.get_variable('first_weights',\n initializer=tf.truncated_normal(\n [first_filter_height, first_filter_width, 1, first_filter_count],\n stddev=0.01))\n first_bias = tf.get_variable('first_bias', initializer=tf.zeros([first_filter_count]))\n first_conv = tf.nn.conv2d(fingerprint_4d, first_weights, [1, 1, 1, 1],\n 'SAME') + first_bias\n first_relu = tf.nn.relu(first_conv)\n if is_training:\n first_dropout = tf.nn.dropout(first_relu, params.keep_prob)\n else:\n first_dropout = first_relu\n max_pool = tf.nn.max_pool(first_dropout, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME')\n second_filter_width = 4\n second_filter_height = 10\n second_filter_count = 64\n second_weights = tf.get_variable('second_weights',\n initializer=tf.truncated_normal(\n [\n second_filter_height, second_filter_width, first_filter_count,\n second_filter_count\n ],\n stddev=0.01))\n second_bias = tf.get_variable('second_bias', initializer=tf.zeros([second_filter_count]))\n second_conv = tf.nn.conv2d(max_pool, second_weights, [1, 1, 1, 1],\n 'SAME') + second_bias\n second_relu = tf.nn.relu(second_conv)\n if is_training:\n second_dropout = tf.nn.dropout(second_relu, params.keep_prob)\n else:\n second_dropout = second_relu\n second_conv_shape = second_dropout.get_shape()\n second_conv_output_width = second_conv_shape[2]\n second_conv_output_height = second_conv_shape[1]\n second_conv_element_count = int(\n second_conv_output_width * second_conv_output_height *\n second_filter_count)\n flattened_second_conv = tf.reshape(second_dropout,\n [-1, second_conv_element_count])\n label_count = params.label_count\n final_fc_weights = tf.get_variable('final_fc_weights',\n initializer=tf.truncated_normal(\n [second_conv_element_count, label_count], stddev=0.01))\n final_fc_bias = tf.get_variable('final_fc_bias', initializer=tf.zeros([label_count]))\n final_fc = tf.matmul(flattened_second_conv, final_fc_weights) + final_fc_bias\n return final_fc\n # if is_training:\n # return final_fc, dropout_prob\n # else:\n # return final_fc, 0.5 # fix this. remove the 0.5\n\ndef main():\n fingerprint = tf.zeros([2, 3920])\n model_settings={\n 'desired_samples': 16000, \n 'window_size_samples': 480, \n 'window_stride_samples': 160, \n 'spectrogram_length': 98, \n 'dct_coefficient_count': 40, \n 'fingerprint_size': 3920, \n 'label_count': 12, \n 'sample_rate': 16000\n }\n hparams=HParams(**model_settings)\n is_training = True\n model = create_model(fingerprint, hparams, is_training)\n print(model)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/fmfcc/model_def.py","file_name":"model_def.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"273093337","text":"# ../scripts/included/gg_deathmatch/modules/countdown.py\r\n\r\n'''\r\n$Rev$\r\n$LastChangedBy$\r\n$LastChangedDate$\r\n'''\r\n\r\n# =============================================================================\r\n# >> IMPORTS\r\n# =============================================================================\r\n# EventScripts Imports\r\n# ES\r\nfrom es import ServerVar\r\nfrom es import exists\r\n# Gamethread\r\nfrom gamethread import delayed\r\n# Playerlib\r\nfrom playerlib import getPlayer\r\n\r\n# GunGame Imports\r\n# Modules\r\nfrom gungame51.modules.active import ActiveInfo\r\n# Repeat\r\nfrom gungame51.core.repeat import Repeat\r\n\r\n\r\n# =============================================================================\r\n# >> GLOBAL VARIABLES\r\n# =============================================================================\r\ngg_dm_respawn_delay = ServerVar('gg_dm_respawn_delay')\r\n\r\n\r\n# =============================================================================\r\n# >> CLASSES\r\n# =============================================================================\r\nclass PlayerCountdown(object):\r\n '''Object used to house the countdown methods for BasePlayer objects'''\r\n\r\n def start_repeat(self):\r\n '''Starts the player's repeat'''\r\n\r\n # Is there a delay?\r\n if not int(gg_dm_respawn_delay):\r\n\r\n # If not, simply spawn the player\r\n delayed(0.1, self.check_respawn)\r\n\r\n # No need to go further\r\n return\r\n\r\n # Start the player's repeat\r\n self.repeat.start(1, int(gg_dm_respawn_delay))\r\n\r\n def count_down(self):\r\n '''Sends hudhint messages with remaining time and respawns the player\r\n '''\r\n\r\n # Is the player still on the server?\r\n if not exists('userid', self.userid):\r\n\r\n # If not, remove them from the players dictionary\r\n self.remove_player(self.userid)\r\n\r\n # No need to go further\r\n return\r\n\r\n # Is the player alive?\r\n if not getPlayer(self.userid).isdead:\r\n\r\n # Stop the repeat\r\n self.stop_repeat()\r\n\r\n # No need to continue the count-down\r\n return\r\n\r\n # Is the round inactive?\r\n if not ActiveInfo.round:\r\n\r\n # Send the player a hudhint that the round has ended\r\n self.send_hudhint('RespawnCountdown_RoundEnded')\r\n\r\n # Stop the repeat\r\n self.stop_repeat()\r\n\r\n # No need to continue\r\n return\r\n\r\n # Is there more than 1 loop remaining?\r\n if self.repeat.remaining > 1:\r\n\r\n # Send the player a hudhint with the time remaining\r\n self.send_hudhint(\r\n 'RespawnCountdown_Plural', {'time': self.repeat.remaining})\r\n\r\n # Is there exactly 1 loop remaining?\r\n elif self.repeat.remaining == 1:\r\n\r\n # Send the player a hudhint with the time remaining\r\n self.send_hudhint('RespawnCountdown_Singular')\r\n\r\n # Are there no more loops remaining?\r\n else:\r\n\r\n # Send the player a hudhint that they are being respawned\r\n self.send_hudhint('RespawnCountdown_Ended')\r\n\r\n # Get the remaining time (less than a second) to respawn\r\n remaining = float(gg_dm_respawn_delay) % 1\r\n\r\n # Is there still time remaining?\r\n if remaining:\r\n\r\n # Respawn the player after the remaining time\r\n delayed(remaining, self.check_respawn)\r\n\r\n # Is there no time remaining?\r\n else:\r\n\r\n # Respawn the player immediately\r\n self.gg_player.respawn()\r\n\r\n def stop_repeat(self, delete=False):\r\n '''Stops the repeat and deletes it if needed'''\r\n\r\n # Stop the player's repeat\r\n self.repeat.stop()\r\n\r\n # Does the repeat need deleted?\r\n if delete:\r\n\r\n # Delete the repeat\r\n self.repeat.delete()\r\n\r\n def check_respawn(self):\r\n '''Checks to see if the round is still\r\n active before spawning the player'''\r\n\r\n # Is the round still active?\r\n if ActiveInfo.round:\r\n\r\n # Spawn the player\r\n self.gg_player.respawn()\r\n\r\n @property\r\n def repeat(self):\r\n '''Property used to return the player's Repeat instance'''\r\n\r\n # Does the player have a Repeat instance?\r\n if not hasattr(self, '_repeat'):\r\n\r\n # Create the player's Repeat instance\r\n self._repeat = Repeat(\r\n 'gg_deathmatch_%s' % self.userid, self.count_down)\r\n\r\n # Return the player's Repeat instance\r\n return self._repeat\r\n","sub_path":"cstrike/addons/eventscripts/gungame51/scripts/included/gg_deathmatch/modules/countdown.py","file_name":"countdown.py","file_ext":"py","file_size_in_byte":4625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"168635115","text":"from console import *\r\np = process(\"dataSVM.xlsx\", 4, [1, 'linear', 1])\r\nret, plt_raw, plt_Hyper, a = p.start()\r\nprint(ret)\r\nplt_raw.show()\r\nplt_Hyper.show()\r\na.show()\r\n# for key in relatSet:\r\n# \tprint(key,\"=>\",relatSet[key])\r\n# pd.set_option('display.max_rows', 1000)\r\n# print(classInfo)\r\n\r\n# plt.show()\r\n","sub_path":"datacode/process/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"638231274","text":"from flask import Flask, Response, jsonify, request, send_file\nimport os, shutil, logging\n\nHOST = '0.0.0.0'\nPORT = 8085\nCURRENT_DIR = os.path.join(os.getcwd(), \"data\")\napp = Flask(__name__)\nlogging.basicConfig(filename='datanode.log', level=logging.DEBUG)\n\n\n@app.route(\"/ping\")\ndef ping():\n return Response(\"ping from datanode\", 200)\n\n\n@app.route(\"/format\")\ndef format():\n '''\n Formats the contents of the datanode\n '''\n global CURRENT_DIR\n\n # create root folder if it does not exist\n if not os.path.exists(CURRENT_DIR):\n os.mkdir(CURRENT_DIR)\n\n # iterate through all files and dirs and delete\n for filename in os.listdir(CURRENT_DIR):\n path = os.path.join(CURRENT_DIR, filename)\n try:\n if os.path.isfile(path) or os.path.islink(path):\n os.unlink(path)\n elif os.path.isdir(path):\n shutil.rmtree(path)\n except Exception as e:\n # if file/dir was not deleted write to log\n app.logger.info(f'failed to delete {path}, reason: {e}')\n\n # obtain info about free space\n _, _, free = shutil.disk_usage(CURRENT_DIR)\n\n return jsonify({\"free\": free})\n\n\n@app.route(\"/get\", methods=['GET'])\ndef get_file():\n '''\n Get file from datanode\n '''\n\n print(\"started transmitting file for get_file\")\n file_id = request.json['file_id']\n\n if os.path.isfile(os.path.join(CURRENT_DIR, str(file_id))):\n print(\"file found, sending\")\n return send_file(os.path.join(CURRENT_DIR, str(file_id)))\n else:\n print(\"file is not found\")\n return Response(\"file doesn't exist in this node\", 404)\n\n\n@app.route(\"/put\", methods=['POST'])\ndef put_file():\n '''\n Put file to datanode\n '''\n\n print(\"started uploading file\")\n # obtain file from client\n file_id = [k for k in request.files.keys()][0]\n file = request.files[f'{file_id}']\n try:\n # create file\n print(f\"file: {file}\")\n print(f\"file id: {file_id}\")\n file.save(os.path.join(CURRENT_DIR, str(file_id)))\n return Response(\"\", 200)\n except Exception as e:\n # if not created append to log, response 400\n app.logger.info(f\"failed to upload file because of {e}\")\n return Response(\"\", 400)\n\n\n@app.route(\"/create\", methods=[\"POST\"])\ndef create_file():\n '''\n Creates an empty file in the current directory\n '''\n # obtain file id from client\n print(\"started creating file\")\n print(request.json)\n file_id = request.json['file_id']\n try:\n # create file\n print(file_id)\n open(CURRENT_DIR + '/' + str(file_id), 'a').close()\n return Response(\"\", 200)\n except Exception as e:\n # if not created append to log, response 400\n app.logger.info(f\"failed to create file because of {e}\")\n return Response(\"\", 400)\n\n\nif __name__ == '__main__':\n app.run(debug=True, host=HOST, port=PORT)\n","sub_path":"datanodes.py","file_name":"datanodes.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"153481914","text":"\r\n# coding: utf-8\r\n\r\n# In[29]:\r\n\r\n\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\n\r\n# In[30]:\r\n\r\n\r\nimport os\r\n\r\n#scrips_folder=r\"C:\\Users\\langh\\Dropbox\\MCC-I Masters\\Thesis\\A17\\1. Training\\scripts\"\r\nscrips_folder=r\"C:\\Users\\nhurst\\Desktop\\resultsTec\"\r\n\r\npred_batch_folder=os.path.join(scrips_folder, r'pred_batch')\r\ntrue_batch_folder=os.path.join(scrips_folder, r'true_batch')\r\noverlay_folder=os.path.join(scrips_folder, r'overlays')\r\nfalse_negatives=os.path.join(scrips_folder, r'false_negatives')\r\nfalse_positives=os.path.join(scrips_folder, r'false_positives')\r\ntrue_negatives=os.path.join(scrips_folder, r'true_negatives')\r\ntrue_positives=os.path.join(scrips_folder, r'true_positives')\r\n\r\npascal_folder=r'W:\\Dropbox\\MCC-I Masters\\Thesis\\A17\\1. Training\\database\\PASCAL_format_train300'\r\nSegmentationClass_folder=os.path.join(pascal_folder, r'SegmentationClass')\r\n\r\nimages_folder=os.path.join(pascal_folder,'JPEGImages')\r\nimages_folder=r\"C:\\Users\\nhurst\\Desktop\\single\\images\"\r\n\r\n\r\n# In[31]:\r\n\r\n\r\nget_ipython().run_line_magic('run', 'evaluate.py')\r\n\r\n\r\n# In[32]:\r\n\r\n\r\nimport os\r\nfrom shutil import copyfile, rmtree\r\n\r\nfor f in os.listdir(pred_batch_folder):\r\n copyfile(os.path.join(SegmentationClass_folder,f), os.path.join(true_batch_folder,f))\r\n\r\n\r\n# In[33]:\r\n\r\n\r\nfrom scipy import misc\r\nimport os\r\nimport numpy as np\r\n\r\ny_true_batch=[]\r\ny_pred_batch=[]\r\n\r\ntrue_batch_folder_files = [f for f in os.listdir(true_batch_folder) if os.path.isfile(os.path.join(true_batch_folder,f))]\r\nfor f in true_batch_folder_files:\r\n true_mask = misc.imread(os.path.join(true_batch_folder,f), mode='L')\r\n y_true_batch.append(true_mask)\r\n pred_mask = misc.imread(os.path.join(pred_batch_folder,f), mode='L')\r\n y_pred_batch.append(pred_mask)\r\n if(len(pred_mask)!=len(true_mask)):\r\n print(\"{},{},{}\".format(f,len(pred_mask),len(true_mask)))\r\n\r\nresult, results1,results2 = computeIoU(y_true_batch, y_pred_batch)\r\n\r\nimport pandas as pd\r\nprint(len(true_batch_folder_files))\r\nprint(result)\r\n#print(np.average(results[np.where(results != -1)]))\r\n\r\ndf = pd.DataFrame({'file': true_batch_folder_files, 'independent_result': results1[np.where(results1 != -1)], 'dlib_result': results2[np.where(results2 != -1)]})\r\ndf.sort_values(by='independent_result', ascending=True, inplace=True)\r\ndf\r\n\r\n\r\n# In[34]:\r\n\r\n\r\ndf[df['file']=='104101927.png']\r\n\r\n\r\n# In[36]:\r\n\r\n\r\ndf.to_csv(os.path.join(scrips_folder,\"results.csv\"), sep=',', encoding='utf-8')\r\n\r\n\r\n# In[37]:\r\n\r\n\r\nimport base64\r\n\r\npd.set_option('display.max_colwidth', -1)\r\n\r\ndef get_thumbnail(path):\r\n image = None\r\n if(path is not None):\r\n image = open(path, 'rb')\r\n return image\r\n\r\ndef image_base64(image):\r\n if isinstance(image, str):\r\n image = get_thumbnail(image)\r\n image_read = image.read()\r\n image.close()\r\n image_64_encode = base64.encodestring(image_read)\r\n data = image_64_encode.decode('ascii') \r\n return data\r\n \r\ndef svg_image_formatter(im):\r\n return f''\r\n\r\ndef jpeg_image_formatter(im):\r\n return f''\r\n\r\ndef png_image_formatter(im):\r\n return f''\r\n\r\n\r\n# In[40]:\r\n\r\n\r\nimport cv2\r\n\r\nimages = [f.replace(\".png\",\"\") for f in os.listdir(true_batch_folder) if f.endswith(\".png\")]\r\n\r\nalpha=0.8\r\n\r\nfor image in images:\r\n output=cv2.imread(os.path.join(images_folder, image + \".png\"))\r\n groud_truth=cv2.imread(os.path.join(true_batch_folder, image + \".png\"))\r\n overlay=cv2.imread(os.path.join(pred_batch_folder, image + \".png\"))\r\n overlay = cv2.cvtColor(overlay,cv2.COLOR_BGR2RGB)\r\n overlay[np.where(((overlay!=[255,255,255]) & (groud_truth==[255,255,255])).all(axis=2))] = [0,0,255]\r\n overlay[np.where(((groud_truth==[255,255,255]) & (overlay==[255,255,255])).all(axis=2))] = [0,255,0]\r\n overlay[np.where((overlay==[255,255,255]).all(axis=2))] = [255,0,0]\r\n overlay = cv2.bitwise_or(output, overlay)\r\n cv2.addWeighted(overlay, alpha, output, 1 - alpha, 0, output)\r\n cv2.imwrite(os.path.join(overlay_folder, image + \".png\"), output)\r\n\r\n\r\n# In[45]:\r\n\r\n\r\nimport pdfkit\r\nfrom IPython.display import HTML\r\n\r\ndf[\"JPEGImage\"]=df[['file']].apply(lambda row: get_thumbnail(os.path.join(images_folder,row['file'])), axis=1)\r\ndf[\"ground_truth\"]=df[['file']].apply(lambda row: get_thumbnail(os.path.join(true_batch_folder,row['file'])), axis=1)\r\ndf[\"prediction\"]=df[['file']].apply(lambda row: get_thumbnail(os.path.join(pred_batch_folder,row['file'])), axis=1)\r\ndf[\"overlay\"]=df[['file']].apply(lambda row: get_thumbnail(os.path.join(overlay_folder,row['file'])), axis=1)\r\n\r\ndf.sort_values(by='independent_result', ascending=True, inplace=True)\r\nhtml_string=df[['file','independent_result','dlib_result','JPEGImage','ground_truth','prediction','overlay']].to_html(formatters={'JPEGImage': png_image_formatter, 'ground_truth': png_image_formatter, 'prediction': png_image_formatter, 'overlay': png_image_formatter}, escape=False)\r\nHTML(html_string)\r\n\r\nf=open(os.path.join(scrips_folder,\"icons.html\"), \"w+\")\r\nf.write(html_string)\r\n\r\npdfkit.from_string(html_string,os.path.join(scrips_folder,'results.pdf'))\r\n\r\n\r\n# In[44]:\r\n\r\n\r\nimport pdfkit\r\nfrom IPython.display import HTML\r\n\r\ndf[\"JPEGImage\"]=df[['file']].apply(lambda row: get_thumbnail(os.path.join(images_folder,row['file'].replace('.png','.jpg'))), axis=1)\r\ndf[\"ground_truth\"]=df[['file']].apply(lambda row: get_thumbnail(os.path.join(true_batch_folder,row['file'])), axis=1)\r\ndf[\"prediction\"]=df[['file']].apply(lambda row: get_thumbnail(os.path.join(pred_batch_folder,row['file'])), axis=1)\r\ndf[\"overlay\"]=df[['file']].apply(lambda row: get_thumbnail(os.path.join(overlay_folder,row['file'])), axis=1)\r\n\r\ndf.sort_values(by='independent_result', ascending=True, inplace=True)\r\nhtml_string=df[['file','independent_result','dlib_result','JPEGImage','ground_truth','prediction','overlay']][:20].to_html(formatters={'JPEGImage': jpeg_image_formatter, 'ground_truth': png_image_formatter, 'prediction': png_image_formatter, 'overlay': png_image_formatter}, escape=False)\r\nHTML(html_string)\r\n\r\nf=open(os.path.join(scrips_folder,\"icons.html\"), \"w+\")\r\nf.write(html_string)\r\n\r\npdfkit.from_string(html_string,os.path.join(scrips_folder,'bad_results.pdf'))\r\n\r\ndf.sort_values(by='independent_result', ascending=False, inplace=True)\r\nhtml_string=df[['file','independent_result','dlib_result','JPEGImage','ground_truth','prediction','overlay']][:20].to_html(formatters={'JPEGImage': jpeg_image_formatter, 'ground_truth': png_image_formatter, 'prediction': png_image_formatter, 'overlay': png_image_formatter}, escape=False)\r\nHTML(html_string)\r\n\r\nf=open(os.path.join(scrips_folder,\"icons.html\"), \"w+\")\r\nf.write(html_string)\r\n\r\npdfkit.from_string(html_string,os.path.join(scrips_folder,'good_results.pdf'))\r\n\r\n","sub_path":"1. Satellite/Scripts/4. evaluation/3. evaluation.py","file_name":"3. evaluation.py","file_ext":"py","file_size_in_byte":7006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"471594818","text":"import xml.etree.ElementTree\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n\nframelist = []\nxPosList = []\nindicatedXlist = []\nindicatedYlist = []\nXListQueue = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\nYListQueue = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\ni = 0\nsens = 30\n\n# Choose the XML data file to process\npath = sys.argv[1]\ne = xml.etree.ElementTree.parse(path).getroot()\n\ndef add_to_list(a, b, c, d):\n\n framelist.append(a)\n xPosList.append(b)\n indicatedXlist.append(c)\n indicatedYlist.append(d)\n\nfor child in e[0]:\n\n frame = (int(child.attrib['key']))\n xPos = (float(child.find('xPos').text))\n indicatedX = (int(child.find('IndicatedValues').find('IndicatedX').text))\n indicatedY = (500 - int(child.find('IndicatedValues').find('IndicatedY').text))\n XListQueue.pop(0)\n XListQueue.append(float(indicatedX))\n YListQueue.pop(0)\n YListQueue.append(float(indicatedY))\n\n if i < 10:\n\n add_to_list(frame, xPos, indicatedX, indicatedY)\n\n elif sum(XListQueue)/len(XListQueue) - sens <= indicatedX <= sum(XListQueue)/len(XListQueue) + sens \\\n and sum(YListQueue)/len(YListQueue) - sens <= indicatedY <= sum(YListQueue)/len(YListQueue) + sens:\n\n add_to_list(frame, xPos, indicatedX, indicatedY)\n\n i += 1\n\n\n# Create the figures and plots the data\nfig = plt.figure(figsize=(13, 13))\n\n#ax1 = fig.add_subplot(3,1,1)\n#ax1.set_xlabel('X Coordinate')\n#ax1.set_ylabel('Y Coordinate')\n#ax1.plot(indicatedXlist, indicatedYlist, 'ro')\n#ax1.set_xlim([0,600])\n#ax1.set_ylim([0,500])\n\n#ax2 = fig.add_subplot(3,1,2)\n#ax2.set_xlabel('X Coordinate')\n#ax2.set_ylabel('Input Coordinate')\n#ax2.plot(indicatedXlist, xPosList, 'g+')\n#ax2.set_xlim([0,600])\n#ax2.set_ylim([0,5])\n\n#ax3 = fig.add_subplot(3,1,3)\n#ax3.set_xlabel('Y Coordinate')\n#ax3.set_ylabel('Input Coordinate')\n#ax3.plot(indicatedYlist, xPosList, 'b+')\n#ax3.set_xlim([0,500])\n#ax3.set_ylim([0,5])\n\nax2 = fig.add_subplot(1,1,1, projection='3d')\nax2.set_title('Digit Movement Against Input Coordinate')\nax2.set_xlabel('X Coordinate')\nax2.set_ylabel('Y Coordinate')\nax2.set_zlabel('Input Coordinate')\nax2.plot(indicatedXlist, indicatedYlist, 'r+', zdir='z', zs=0)\nax2.plot(indicatedXlist, xPosList, 'g+', zdir='y', zs=500)\nax2.plot(indicatedYlist, xPosList, 'b+', zdir='x', zs=0)\nax2.scatter(indicatedXlist, indicatedYlist, xPosList, c='r', marker='o')\n\nax2.set_xlim([0,600])\nax2.set_ylim([0,500])\nax2.set_zlim([0,5])\n\nplt.show()","sub_path":"accuracy_analysis.py","file_name":"accuracy_analysis.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"423379629","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef create_circle(r, x_offset=0, y_offset=0):\n \"\"\"\n Plots a circle with radius r centered at the point (x_offset, y_offset)\n \n Args:\n r (double): Radius of the circle, normalized to the width and height of the plot\n x_offset (double): x-coordinate of the circle's center, normalized to the width of the plot\n y_offset (double): y-coordinate of the circle's center, normalized to the height of the plot\n \n Returns:\n None\n \"\"\"\n x = np.linspace(-1, 1, 1000)\n y = np.linspace(-1, 1, 1000)\n \n [X, Y] = np.meshgrid(x, y)\n circle = ((X-x_offset)**2 + (Y-y_offset)**2) < r\n \n plt.figure()\n plt.imshow(circle, origin='lower')\n\n\ndef create_ellipse(a, b, x_offset=0, y_offset=0, orientation='horizontal'):\n \"\"\"\n Plots an ellipse with semi-major axis a and semi-minor axis b, centered at the point (x_offset, y_offset)\n \n Args:\n a (double): Semi-major axis of the ellipse, normalized to the width of the plot\n b (double): Semi-minor axis of the ellipse, normalized to the height of the plot\n x_offset (double): x-coordinate of the circle's center, normalized to the width of the plot\n y_offset (double): y-coordinate of the circle's center, normalized to the height of the plot\n orientation (string): Defines whether the semi-major axis lies along the x-axis (horizontal) or the y-axis (vertical)\n \n Returns:\n None\n \"\"\"\n \n #The semi-major axis must always be larger than the semi-minor axis\n #Gracefully handles case where user accidentally passes a and b in the wrong order\n if b > a:\n a, b = b, a\n \n x = np.linspace(-1, 1, 1000)\n y = np.linspace(-1, 1, 1000)\n \n if orientation == 'horizontal':\n # Semi-major axis is in x-direction, giving the impression of a \"horizontal\" ellipse\n rx = a\n ry = b\n if orientation == 'vertical':\n # Semi-major axis is in y-direction, giving the impression of a \"vertical\" ellipse\n rx = b\n ry = a\n \n [X, Y] = np.meshgrid(x,y)\n ellipse = (((X-x_offset)/rx)**2 + ((Y-y_offset)/ry)**2) < 1\n \n plt.figure()\n plt.imshow(ellipse, origin='lower')\n \n \ndef create_rectangle(width, height):\n \"\"\"\n Creates a rectangle in the center of the plot.\n \n Args:\n width (double): Width of the rectangle, normalized to the width of the plot\n height (double): Height of the rectangle, normalized to the height of the plot\n \"\"\"\n w = int(width*1000)\n h = int(height*1000)\n \n grid = np.zeros([1000, 1000])\n \n x = np.size(grid, 1)\n y = np.size(grid, 0)\n \n grid[(y - h)//2:(y + h)//2, (x - w)//2:(x + w)//2] = 1\n \n plt.figure()\n plt.imshow(grid, origin='lower')\n\n \ndef create_formed_half_space(x, y, function, inverted=False):\n \"\"\"\n Defines a boundary separating the plane into two sections along the y-axis, and fills only one side of the boundary.\n \n Args:\n x (ndarray): Numpy array of points along the x-axis\n y (ndarray): Numpy array of points along the y-axis\n function (function): A function object, which will operate on x to obtain a boundary\n inverted (bool): False by default, wherein all elements below the boundary are set to 1. If inverted is True, all elements above the boundary are set to 1.\n \n Returns:\n None\n \"\"\"\n [X, Y] = np.meshgrid(x, y)\n \n f = function(x)\n half_space = f > Y\n \n if inverted:\n half_space = ~half_space\n \n plt.figure()\n plt.imshow(half_space, origin='lower')\n \n\ndef line(m, b):\n \"\"\"\n Returns a lambda expression for the equation of a line, with slope m and intercept b.\n\n Args:\n m (double): The slope of the line\n b (double): The intercept of the line\n\n Returns:\n A lambda expression\n \"\"\"\n return lambda x: m*x + b\n\n\nif __name__ == '__main__':\n create_circle(0.6)\n create_circle(0.3, x_offset=0.2, y_offset=0.4)\n \n create_ellipse(0.7, 0.3)\n create_ellipse(0.9, 0.1, orientation='vertical')\n create_ellipse(0.5, 0.5, x_offset=-0.3)\n \n create_rectangle(0.4, 0.8)\n create_rectangle(1, 0.3)\n \n x = np.linspace(0, 2*np.pi, 1000)\n y = np.linspace(-2, 2, 1000)\n \n create_formed_half_space(x, y, np.sin)\n \n x = np.linspace(0, 10, 1000)\n y = np.linspace(0, 10, 1000)\n \n m = -2\n b = 7\n function = line(m, b)\n \n create_formed_half_space(x, y, function, inverted=True)","sub_path":"geometries.py","file_name":"geometries.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"283347328","text":"#coding=utf-8\nfrom flask import Flask,render_template,redirect,url_for,request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField,SubmitField\nfrom wtforms.validators import DataRequired\n\n\napp = Flask(__name__)\n\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:mysql@127.0.0.1:3306/temp'\napp.config['SECRET_KEY'] = 'python'\n\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True\n\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n\n\ndb = SQLAlchemy(app)\n\n#定义模型类-作者\nclass Author(db.Model):\n __tablename__ = 'author'\n id = db.Column(db.Integer,primary_key=True)\n name = db.Column(db.String(32),unique=True)\n email = db.Column(db.String(64))\n au_book = db.relationship('Book',backref='author')\n def __str__(self):\n return 'Author:%s' %self.name\n\n#定义模型类-书名\nclass Book(db.Model):\n __tablename__ = 'books'\n id = db.Column(db.Integer,primary_key=True)\n info = db.Column(db.String(32),unique=True)\n leader = db.Column(db.String(32))\n au_book = db.Column(db.Integer,db.ForeignKey('author.id'))\n def __str__(self):\n return 'Book:%s,%s'%(self.info,self.leader)\n\n\n#创建表单类,用来添加信息\nclass Append(FlaskForm):\n au_info = StringField(validators=[DataRequired()])\n bk_info = StringField(validators=[DataRequired()])\n submit = SubmitField(u'添加')\n\n\n@app.route('/',methods=['GET','POST'])\ndef index():\n #查询所有作者和书名信息\n author = Author.query.all()\n book = Book.query.all()\n #创建表单对象\n form = Append()\n if form.validate_on_submit():\n #获取表单输入数据\n wtf_au = form.au_info.data\n wtf_bk = form.bk_info.data\n #把表单数据存入模型类\n db_au = Author(name=wtf_au)\n db_bk = Book(info=wtf_bk)\n #提交会话\n db.session.add_all([db_au,db_bk])\n db.session.commit()\n #添加数据后,再次查询所有作者和书名信息\n author = Author.query.all()\n book = Book.query.all()\n return render_template('index.html',author=author,book=book,form=form)\n else:\n if request.method=='GET':\n render_template('index.html', author=author, book=book,form=form)\n return render_template('index.html',author=author,book=book,form=form)\n\n#删除作者\n@app.route('/delete_author')\ndef delete_author(id):\n #精确查询需要删除的作者id\n au = Author.query.filter_by(id=id).first()\n db.session.delete(au)\n db.session.commit()\n #直接重定向到index视图函数\n return redirect(url_for('index'))\n\n#删除书名\n@app.route('/delete_book')\ndef delete_book(id):\n #精确查询需要删除的书名id\n bk = Book.query.filter_by(id=id).first()\n db.session.delete(bk)\n #直接重定向到index视图函数\n db.session.commit()\n return redirect(url_for('index'))\n\n\nif __name__ == '__main__':\n db.drop_all()\n db.create_all()\n #生成数据\n au_xi = Author(name='我吃西红柿',email='xihongshi@163.com')\n au_qian = Author(name='萧潜',email='xiaoqian@126.com')\n au_san = Author(name='唐家三少',email='sanshao@163.com')\n bk_xi = Book(info='吞噬星空',leader='罗峰')\n bk_xi2 = Book(info='寸芒',leader='李杨')\n bk_qian = Book(info='飘渺之旅',leader='李强')\n bk_san = Book(info='冰火魔厨',leader='融念冰')\n #把数据提交给用户会话\n db.session.add_all([au_xi,au_qian,au_san,bk_xi,bk_xi2,bk_qian,bk_san])\n #提交会话\n db.session.commit()\n app.run(debug=True)","sub_path":"test/Flask/04/python24.py","file_name":"python24.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"540490410","text":"from vector import *\n\nclass Light(object):\n\n def __init__( self, location, color ):\n self.location = location\n self.color = color\n\n @staticmethod\n def ambient_color( ambient_light, reflection ):\n return [ ambient_light[0]*reflection[0],\n ambient_light[1]*reflection[1],\n ambient_light[2]*reflection[2] ]\n\n def diffuse_color( self, reflection, normal ):\n normal_mul = Vector.dot(self.location, normal)\n if ( normal_mul < 0 ):\n normal_mul = 0\n return [ int(self.color[0] * reflection[0] * normal_mul),\n int(self.color[1] * reflection[1] * normal_mul),\n int(self.color[2] * reflection[2] * normal_mul) ]\n\n def specular_color( self, reflection, view, normal, specular_exp=8 ):\n normal_mul = 2 * Vector.dot(self.location, normal)\n v = Vector([ (normal[0] * normal_mul) - self.location[0],\n (normal[1] * normal_mul) - self.location[1],\n (normal[2] * normal_mul) - self.location[2] ])\n view_mul = Vector.dot(v, view)\n if ( view_mul < 0 ):\n view_mul = 0\n view_mul = view_mul ** specular_exp\n return [ int(self.color[0] * reflection[0] * view_mul),\n int(self.color[1] * reflection[1] * view_mul),\n int(self.color[2] * reflection[2] * view_mul) ]\n","sub_path":"light.py","file_name":"light.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"543080898","text":"\"\"\"Tests for Ralph utils.\"\"\"\n\nimport pytest\nfrom pydantic import BaseModel\n\nfrom ralph import utils as ralph_utils\nfrom ralph.conf import InstantiableSettingsItem, settings\n\n\ndef test_utils_import_string():\n \"\"\"Tests import_string utility taken from Django utilities.\"\"\"\n with pytest.raises(ImportError, match=\"foo doesn't look like a module path\"):\n ralph_utils.import_string(\"foo\")\n\n with pytest.raises(\n ImportError, match='Module \"requests\" does not define a \"foo\" attribute/class'\n ):\n ralph_utils.import_string(\"requests.foo\")\n\n http_status = ralph_utils.import_string(\"http.HTTPStatus\")\n assert http_status.OK == 200\n\n\ndef test_utils_get_backend_type():\n \"\"\"Tests get_backend_type utility.\"\"\"\n assert (\n ralph_utils.get_backend_type(settings.BACKENDS, \"es\")\n == settings.BACKENDS.DATABASE\n )\n assert (\n ralph_utils.get_backend_type(settings.BACKENDS, \"ldp\")\n == settings.BACKENDS.STORAGE\n )\n assert (\n ralph_utils.get_backend_type(settings.BACKENDS, \"ws\")\n == settings.BACKENDS.STREAM\n )\n assert ralph_utils.get_backend_type(settings.BACKENDS, \"foo\") is None\n\n\n@pytest.mark.parametrize(\n \"options,expected\",\n [\n # Empty options should produce default result.\n ({}, {}),\n # Options not matching the backend name are ignored.\n ({\"foo\": \"bar\", \"not_dummy_foo\": \"baz\"}, {}),\n # One option matches the backend name and overrides the default.\n ({\"dummy_foo\": \"bar\", \"not_dummy_foo\": \"baz\"}, {\"foo\": \"bar\"}),\n ],\n)\ndef test_utils_get_backend_instance(options, expected):\n \"\"\"Tests get_backend_instance utility should return the expected result.\"\"\"\n\n class DummyBackendSettings(InstantiableSettingsItem):\n \"\"\"Represents a dummy backend setting.\"\"\"\n\n foo: str = \"foo\" # pylint: disable=disallowed-name\n\n def get_instance(self, **init_parameters): # pylint: disable=no-self-use\n \"\"\"Returns the init_parameters.\"\"\"\n return init_parameters\n\n class TestBackendType(BaseModel):\n \"\"\"A backend type including the DummyBackendSettings.\"\"\"\n\n DUMMY: DummyBackendSettings = DummyBackendSettings()\n\n backend_instance = ralph_utils.get_backend_instance(\n TestBackendType(), \"dummy\", options\n )\n assert isinstance(backend_instance, dict)\n assert backend_instance == expected\n\n\n@pytest.mark.parametrize(\"path,value\", [([\"foo\", \"bar\"], \"bar_value\")])\ndef test_utils_get_dict_value_from_path_should_return_given_value(path, value):\n \"\"\"Tests the get_dict_value_from_path function should return the value when it's\n present.\n \"\"\"\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) == value\n\n\n@pytest.mark.parametrize(\n \"path\",\n [\n [\"foo\", \"bar\", \"baz\", \"qux\"],\n [\"foo\", \"not_bar\"],\n [\"not_foo\", \"bar\"],\n None,\n ],\n)\ndef test_utils_get_dict_value_from_path_should_return_none_when_value_does_not_exists(\n path,\n):\n \"\"\"Tests the get_dict_value_from_path function should return None if the value is\n not found.\n \"\"\"\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) is None\n\n\ndef test_utils_set_dict_value_from_path_creating_new_fields():\n \"\"\"Tests when the fields are not present, set_dict_value_from_path should add\n them.\n \"\"\"\n dictionary = {}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}\n\n\ndef test_utils_set_dict_value_from_path_updating_fields():\n \"\"\"Tests when the fields are present, set_dict_value_from_path should update\n them.\n \"\"\"\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"203730821","text":"\nimport sys\nfrom collections import defaultdict\n\nimport numpy\n\n# from six import PY3, iteritems, iterkeys, itervalues, string_types\nif sys.version_info[0] >= 3:\n unicode = str\n\nimport sqlite3\nimport os\nimport datetime\nimport csv\n\nimport processing\n# from config import config_processing\n\nfrom sklearn.preprocessing import OneHotEncoder\n\n\nclass Dictionary:\n\n def __init__(self, db_file, config_processing):\n self.config_processing = config_processing\n self.db_file = db_file\n self.occ_threshold = 0\n self.corpus = Corpus_db(self)\n\n @property\n def dfs(self):\n dfs = {}\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n\n if self.occ_threshold == 0:\n c.execute(\"SELECT * FROM global_dict\")\n c2 = conn.cursor()\n for row in c:\n c2.execute(\"SELECT count(*) FROM occurrences WHERE word_id=\" + str(row[0]))\n try:\n dfs[row[0] - 1] = c2.fetchone()[0]\n except:\n pass\n else:\n c.execute(\"SELECT global_dict.word_id, word_id_index_\" + str(self.occ_threshold)\n + \".word_id_out FROM global_dict INNER JOIN word_id_index_\" + str(self.occ_threshold)\n + \" ON global_dict.word_id = word_id_index_\" + str(self.occ_threshold) + \".word_id\")\n c2 = conn.cursor()\n for row in c:\n c2.execute(\"SELECT count(*) FROM occurrences WHERE word_id=\" + str(row[0]))\n try:\n dfs[row[1] - 1] = c2.fetchone()[0] # row[1] is word_id_out\n except:\n pass\n\n conn.close() # TODO try double join instead of loop\n return dfs\n\n @property\n def num_docs(self):\n docno = -1\n\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n\n if self.occ_threshold == 0:\n c.execute(\"SELECT count(*) FROM reddit_comments\")\n else:\n c.execute(\"SELECT count(*) FROM (\"\n + \"SELECT DISTINCT occurrences.comment_id \"\n + \"FROM occurrences INNER JOIN word_id_index_\" + str(self.occ_threshold)\n + \" ON occurrences.word_id = word_id_index_\" + str(self.occ_threshold) + \".word_id)\")\n # count of documents that contain at least one word listed in word_id_index\n try:\n docno = c.fetchone()[0]\n except:\n pass\n\n\n conn.close()\n return docno + 1\n\n @property\n def num_nnz(self):\n numnnz = 0\n\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n\n if self.occ_threshold == 0:\n c.execute(\"SELECT count(*) FROM occurrences\")\n else:\n c.execute(\"SELECT count(*) FROM occurrences INNER JOIN word_id_index_\" + str(self.occ_threshold)\n + \" ON occurrences.word_id = word_id_index_\" + str(self.occ_threshold) + \".word_id\")\n try:\n numnnz = c.fetchone()[0]\n except:\n pass\n\n conn.close()\n return numnnz\n\n # def doc2bow(self, document): # left for compatibility, not in use, NO occ_threshold support!\n # if isinstance(document, string_types):\n # raise TypeError(\"doc2bow expects an array of unicode tokens on input, not a single string\")\n #\n # # Construct (word, frequency) mapping.\n # counter = defaultdict(int)\n # for w in document:\n # counter[w if isinstance(w, unicode) else unicode(w, 'utf-8')] += 1\n #\n # conn = sqlite3.connect(self.db_file)\n # c = conn.cursor()\n #\n # # for w in counter.keys():\n # # c.execute(\"SELECT * FROM global_dict WHERE word=\"+ w)\n #\n # c.execute(\"SELECT * FROM global_dict\") # TODO optimize: loop counter first and make separate queries for id (row[0]) of each term\n #\n # result = []\n # for row in c:\n # if row[1] in counter:\n # result.append((row[0]-1, counter[row[1]]))\n # conn.close()\n # return result\n\n def processed_doc2bow(self, processed_doc, threshold=0):\n # Construct (word, frequency) mapping.\n counter = defaultdict(int)\n for w in processed_doc:\n word = (w[0] if isinstance(w[0], unicode) else unicode(w[0], 'utf-8'),\n w[1] if isinstance(w[1], unicode) else unicode(w[1], 'utf-8'))\n counter[word] += 1\n\n result = []\n\n if self.occ_threshold == 0:\n query_str = \"SELECT word_id, global_occurrences FROM global_dict WHERE word='\"\n else:\n query_str = \"SELECT word_id_index_\" + str(self.occ_threshold) \\\n + \".word_id_out, global_dict.global_occurrences FROM global_dict INNER JOIN word_id_index_\" \\\n + str(self.occ_threshold) + \" ON global_dict.word_id = word_id_index_\" + str(self.occ_threshold) \\\n + \".word_id WHERE word='\"\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n for w in counter.keys():\n c.execute(query_str + w[0] +\"' AND type='\" + w[1] + \"'\")\n word_id = None\n occ = 0\n for row in c:\n word_id = row[0]-1\n occ = row[1]\n if word_id and occ >= threshold:\n result.append((word_id, counter[w]))\n conn.close()\n return result\n\n def add_csv_data(self, csv_file_path):\n self._init_db()\n if os.path.isfile(csv_file_path):\n self.add_single_csv(csv_file_path)\n else:\n # isdir\n filenames = os.listdir(csv_file_path)\n for f in filenames:\n if f.endswith(\".csv\"): # assume all .csv files in the folder have appropriate format\n self.add_single_csv(os.path.join(csv_file_path, f))\n\n def _init_db(self):\n table_name = 'reddit_comments'\n if os.path.exists(self.db_file):\n os.remove(self.db_file) # drop the entire db\n\n if not os.path.isfile(self.db_file):\n directory = '/'.join(os.path.split(self.db_file)[0:-1])\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n\n c.execute(\"PRAGMA foreign_keys = ON\")\n\n # c.execute(\n # 'CREATE TABLE {tn} (comment_id INTEGER PRIMARY KEY, time TIMESTAMP(14), username TEXT, comment TEXT, tag TEXT)' # , PRIMARY KEY (id)\n # .format(tn=table_name))\n\n c.execute('CREATE TABLE {tn} (comment_id INTEGER PRIMARY KEY, time TIMESTAMP(14), comment TEXT, username_id INTEGER, tag_id INTEGER, FOREIGN KEY (tag_id) REFERENCES tags (tag_id), FOREIGN KEY (username_id) REFERENCES usernames (username_id))'\n .format(tn=table_name))\n\n c.execute(\n 'CREATE TABLE global_dict (word_id INTEGER PRIMARY KEY, word TEXT, type TEXT, global_occurrences INTEGER, UNIQUE (word, type))' # UNIQUE (word, type) # TODO set both\n .format(tn=table_name))\n\n c.execute(\n 'CREATE TABLE occurrences (word_id INTEGER, comment_id INTEGER, occurrences INTEGER, FOREIGN KEY(word_id) REFERENCES global_dict(word_id), FOREIGN KEY(comment_id) REFERENCES {tn}(comment_id), UNIQUE (word_id, comment_id))'\n .format(tn=table_name))\n\n c.execute(\"CREATE TABLE tags (tag_id INTEGER PRIMARY KEY, tag TEXT, UNIQUE (tag))\")\n\n c.execute(\"CREATE TABLE usernames (username_id INTEGER PRIMARY KEY, username TEXT, UNIQUE (username))\")\n\n c.execute(\n 'CREATE TABLE saved_models_tfidf (model_id TEXT, occurrences_threshold INTEGER, num_topics INTEGER, normalize INTEGER)')\n\n c.execute(\n 'CREATE TABLE saved_models_lsi (model_id TEXT, occurrences_threshold INTEGER, num_topics INTEGER, power_iters INTEGER, extra_samples INTEGER)')\n\n c.execute(\n 'CREATE TABLE saved_models_rp (model_id TEXT, occurrences_threshold INTEGER, num_topics INTEGER)')\n\n c.execute(\n 'CREATE TABLE saved_models_lda (model_id TEXT, occurrences_threshold INTEGER, num_topics INTEGER, distributed INTEGER, alpha TEXT, eta REAL)')\n\n c.execute(\n 'CREATE TABLE saved_models_hdp (model_id TEXT, occurrences_threshold INTEGER, num_topics INTEGER, gamma INTEGER, kappa REAL, tau REAL, k REAL, t REAL, eta REAL)')\n\n conn.commit()\n conn.close()\n\n\n def add_single_csv(self, csv_file_path):\n\n table_name = 'reddit_comments'\n\n processor = processing.Processing(**self.config_processing)\n # processor = processing.Processing(delete_punctuation_marks=True, delete_numeral=True, delete_single_words=True, initial_form=True, stop_words=None)\n\n conn = sqlite3.connect(self.db_file)\n\n c = conn.cursor()\n\n c.execute(\"PRAGMA foreign_keys = ON\")\n\n csvfile = open(csv_file_path)\n readCSV = csv.reader(csvfile, delimiter=',')\n\n # to_db = [(\n # datetime.datetime.fromtimestamp(int(row[0])).strftime('%Y-%m-%d %H:%M:%S'),\n # row[1].replace(\"'\", \"''\"),\n # row[2].replace(\"'\", \"''\"),\n # row[3].replace(\"'\", \"''\")\n # ) for row in readCSV]\n # c.executemany(\"INSERT INTO \" + table_name + \" (time, username, comment, tag) VALUES (?, ?, ?, ?)\", to_db)\n\n for row in readCSV:\n time_ = datetime.datetime.fromtimestamp(int(row[0])).strftime('%Y-%m-%d %H:%M:%S')\n username_ = row[1].replace(\"'\", \"''\")\n comment_ = row[2].replace(\"'\", \"''\")\n tag_ = row[3].replace(\"'\", \"''\") # assume there are 4 fields in every line\n\n comment_id = None\n tag_id = None\n username_id = None\n\n try:\n c.execute(\"INSERT INTO tags (tag) VALUES ('\" + tag_ + \"')\")\n tag_id = c.lastrowid\n except sqlite3.IntegrityError as err:\n c.execute(\"SELECT tag_id FROM tags WHERE tag='\" + tag_ + \"'\")\n found = [r for r in c]\n if len(found) > 0:\n tag_id = found[0][0]\n else:\n tag_id = None\n\n try:\n c.execute(\"INSERT INTO usernames (username) VALUES ('\" + username_ + \"')\")\n username_id = c.lastrowid\n except sqlite3.IntegrityError as err:\n c.execute(\"SELECT username_id FROM usernames WHERE username='\" + username_ + \"'\")\n found = [r for r in c]\n if len(found) > 0:\n username_id = found[0][0]\n else:\n username_id = None\n\n try:\n c.execute(\"INSERT INTO \" + table_name + \" (time, comment, username_id, tag_id) VALUES ('\" + time_ +\n \"', '\" + comment_ + \"', '\" + str(username_id) + \"', '\" + str(tag_id) + \"')\")\n comment_id = c.lastrowid\n except sqlite3.IntegrityError as err:\n print(\"Error adding comment issued at \" + time_ + \": \" + str(err))\n comment_id = None\n # to process text and insert result\n\n document, words = processor(comment_)\n\n # print(words)\n for w in words:\n # c.execute(\"IF EXISTS (SELECT * FROM global_dict WHERE word='\" + w[0] + \"' AND type='\" + w[1] + \"') \" +\n # \"UPDATE global_dict SET global_occuerrences=global_occuerrences+\" + str(w[2]) +\n # \" WHERE word='\" + w[0] + \"' AND type='\" + w[1] + \"' \" +\n # \"ELSE INSERT INTO global_dict (word, type, global_occuerrences) VALUES ('\" + w[0] + \"', '\" + w[1] + \"', \" + str(w[2]) + \")\")\n # # added to global dictionatyor updated number of occurrences\n try:\n c.execute(\"INSERT INTO global_dict (word, type, global_occurrences) VALUES ('\" + w[0] + \"', '\" +\n w[1] + \"', \" + str(w[2]) + \")\")\n # print(\"- inserted \" + w[0])\n\n except sqlite3.IntegrityError as err1:\n # UNIQUE constraint prevents from adding, trying updating\n try:\n c.execute(\"UPDATE global_dict SET global_occurrences=global_occurrences+\" + str(w[2]) +\n \" WHERE word='\" + w[0] + \"' AND type='\" + w[1] + \"' \")\n # print(\"- updated \" + w[0])\n except sqlite3.IntegrityError as err2:\n print(\"!! failed both to insert and update word.\\n - error message on INSERT: \" + str(err1)\n + \"\\n - error message on UPDATE: \" + str(err2))\n c.execute(\"SELECT * FROM global_dict WHERE word='\" + w[0] + \"' AND type='\" + w[1] + \"'\")\n word_id = None\n try:\n word_id = c.fetchone()[0]\n except:\n print(\"!! failed to select the word \" + w[0] + \", \" + w[1] + \" in 'global_dict' table\")\n if word_id:\n try:\n c.execute(\n \"INSERT INTO occurrences (word_id, comment_id, occurrences) VALUES ('\" + str(\n word_id) + \"', '\" +\n str(comment_id) + \"', \" + str(w[2]) + \")\")\n except sqlite3.IntegrityError as err:\n print(\"!! failed to insert record into 'occurrences' table.\\n - error message: \" + str(err))\n\n conn.commit()\n conn.close()\n\n\n def keys(self):\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n\n if self.occ_threshold == 0:\n c.execute(\"SELECT word_id FROM global_dict\")\n else:\n c.execute(\"SELECT word_id_index_\" + str(self.occ_threshold)\n + \".word_id_out FROM global_dict INNER JOIN word_id_index_\" + str(self.occ_threshold)\n + \" ON global_dict.word_id = word_id_index_\" + str(self.occ_threshold) + \".word_id\")\n\n result = [row[0]-1 for row in c]\n conn.close()\n\n return result\n\n def __getitem__(self, item):\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n\n if self.occ_threshold == 0:\n c.execute(\"SELECT word FROM global_dict WHERE word_id=\" + str(item + 1))\n else:\n c.execute(\"SELECT global_dict.word FROM global_dict \"\n + \"INNER JOIN word_id_index_\" + str(self.occ_threshold)\n + \" ON global_dict.word_id = word_id_index_\" + str(self.occ_threshold) + \".word_id \"\n + \"WHERE word_id_index_\" + str(self.occ_threshold) + \".word_id_out=\" + str(item+1))\n\n result = [row[0] for row in c]\n conn.close()\n\n if result:\n return result\n else:\n raise IndexError(\"index not found in db\")\n\n def __len__(self):\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n\n if self.occ_threshold == 0:\n c.execute(\"SELECT count(*) FROM global_dict\")\n else:\n c.execute(\"SELECT count(*) FROM word_id_index_\" + str(self.occ_threshold))\n\n result = [row[0] for row in c]\n conn.close()\n\n if result:\n return result[0]\n else:\n return 0\n\n def init_word_id_index(self, occurrences_threshold):\n\n self.occ_threshold = occurrences_threshold\n\n if not os.path.isfile(self.db_file):\n return # has to be called later\n\n if self.occ_threshold == 0:\n return # no need in index\n\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n # check table, create if not exists\n c.execute(\"SELECT name FROM sqlite_master WHERE type='table' AND name='word_id_index_\" + str(self.occ_threshold) + \"'\")\n result = [row[0] for row in c]\n if len(result) == 0:\n\n print(\"Start building index for occurrence threshold \" + str(occurrences_threshold))\n c.execute(\n 'CREATE TABLE word_id_index_' + str(self.occ_threshold)\n + ' (word_id_out INTEGER PRIMARY KEY, word_id INTEGER, FOREIGN KEY(word_id) REFERENCES global_dict(word_id))')\n c.execute(\n \"INSERT INTO word_id_index_\" + str(self.occ_threshold)\n + \" (word_id) SELECT global_dict.word_id FROM global_dict WHERE global_dict.global_occurrences >= \"\n + str(occurrences_threshold))\n\n conn.commit()\n print(\" - done\")\n else:\n print(\"Found index table for occurrence threshold \" + str(occurrences_threshold))\n\n conn.close()\n\n def save(self, fname_or_handle):\n print(\"'save' function is not implemented in SQLite-based Dictionary class\")\n pass # no need to save data, that are originally stored in db\n\n def find_model(self, model_type, occurrences_threshold, **kwargs):\n r = None\n params = \"\"\n for p in kwargs:\n if str(kwargs[p]) == \"True\":\n kwargs[p] = 1\n if str(kwargs[p]) == \"False\":\n kwargs[p] = 0\n params = params + p + \"='\" + str(kwargs[p]) + \"' AND \"\n if len(params) > 5:\n params = params[0:-5]\n\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n\n c.execute(\"SELECT model_id FROM saved_models_\" + model_type.lower() + \" WHERE occurrences_threshold='\"\n + str(occurrences_threshold) + \"' AND \" + params)\n result = [row[0] for row in c]\n\n conn.close()\n\n if len(result) > 0:\n r = result[0]\n\n else:\n print(\"No parameters found in config\")\n\n return r\n\n def add_model(self, model_type, model_id, occurrences_threshold, **kwargs):\n params = \"\"\n values = \"\"\n for p in kwargs:\n if str(kwargs[p]) == \"True\":\n kwargs[p] = 1\n if str(kwargs[p]) == \"False\":\n kwargs[p] = 0\n params = params + p + \", \"\n values = values + \"'\" + str(kwargs[p]) + \"', \"\n if len(params) > 1:\n params = params[0:-2]\n values = values[0:-2]\n\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n\n c.execute(\"INSERT INTO saved_models_\" + model_type.lower() + \" (model_id, occurrences_threshold, \" + params\n + \") VALUES ('\" + model_id + \"', '\" + str(occurrences_threshold) + \"', \" + values + \")\")\n\n conn.commit()\n conn.close()\n\n def create_result_table(self, length, model_id):\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n\n c.execute(\"DROP TABLE IF EXISTS '\" + model_id + \"_results'\")\n\n fields = \"\"\n for f in range(length):\n fields += \"value\" + str(f + 1) + \", \"\n c.execute(\"CREATE TABLE '\" + model_id + \"_results' (comment_id INTEGER, \"\n + fields + \"FOREIGN KEY(comment_id) REFERENCES reddit_comments(comment_id))\")\n\n conn.commit()\n conn.close()\n\n def save_result(self, vector, length, model_id, comment_id):\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n\n values = dict(vector)\n fields = \"\"\n for f in range(length):\n try:\n fields = fields + \", \" + str(values[f])\n except KeyError:\n fields = fields + \", null\"\n\n c.execute(\"INSERT INTO '\" + model_id + \"_results' VALUES(\" + str(comment_id) + fields + \")\")\n\n conn.commit()\n conn.close()\n\n\n def create_usarname_tag_onehot_tables(self, model_id, usernames, tags):\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n\n c.execute(\"DROP TABLE IF EXISTS '{0}_input_tags'\".format(model_id))\n c.execute(\"CREATE TABLE '{0}_input_tags' (tag_id INTEGER, tag_onehot_index INTEGER, FOREIGN KEY (tag_id) REFERENCES reddit_comments (tag_id))\".format(model_id))\n tag_ids = {}\n onehot_idx = -1\n for el in tags: # add to table\n # c.execute(\"INSERT INTO input_tags (tag_id) VALUES (SELECT tag_id FROM tags WHERE tag = '\" + el + \"')\")\n c.execute(\"SELECT tag_id FROM tags WHERE tag = '\" + el + \"'\")\n found = [r for r in c]\n if len(found) > 0:\n onehot_idx += 1\n c.execute(\"INSERT INTO '{0}_input_tags' (tag_id, tag_onehot_index) VALUES (\".format(model_id) + str(found[0][0]) + \", \" + str(onehot_idx) + \")\")\n tag_ids[found[0][0]] = onehot_idx\n\n c.execute(\"DROP TABLE IF EXISTS '{0}_input_usernames'\".format(model_id))\n c.execute(\"CREATE TABLE '{0}_input_usernames' (username_id INTEGER, username_onehot_index INTEGER, FOREIGN KEY (username_id) REFERENCES reddit_comments (username_id))\".format(model_id))\n username_ids = {}\n onehot_idx = -1\n for el in usernames: # add to table\n # c.execute(\"INSERT INTO input_usernames (username_id) VALUES (SELECT username_id FROM usernames WHERE username = '\" + el + \"')\")\n c.execute(\"SELECT username_id FROM usernames WHERE username = '\" + el + \"'\")\n found = [r for r in c]\n if len(found) > 0:\n onehot_idx += 1\n c.execute(\"INSERT INTO '{0}_input_usernames' (username_id, username_onehot_index) VALUES (\".format(model_id) + str(found[0][0]) + \", \" + str(onehot_idx) + \")\")\n username_ids[found[0][0]] = onehot_idx\n\n enc_tags = OneHotEncoder()\n enc_tags.fit([[x] for x in range(0, len(tag_ids))]) # TODO try without OneHotEncoder\n enc_usernames = OneHotEncoder()\n enc_usernames.fit([[x] for x in range(0, len(username_ids))])\n\n c.execute(\"DROP TABLE IF EXISTS '{0}_output_onehot_tag'\".format(model_id))\n c.execute(\"SELECT tag_id FROM '{0}_input_tags'\".format(model_id))\n found = [r for r in c]\n fields_tags_def = \"\"\n fields_tags_usg = \"\"\n for i in found:\n fields_tags_def = fields_tags_def + \", tag_\" + str(tag_ids[i[0]]) + \" REAL\"\n fields_tags_usg = fields_tags_usg + \", tag_\" + str(tag_ids[i[0]])\n c.execute(\"CREATE TABLE '{0}_output_onehot_tag' (comment_id INTEGER\".format(model_id) + fields_tags_def +\n \", FOREIGN KEY (comment_id) REFERENCES reddit_comments (comment_id))\")\n\n\n c.execute(\"DROP TABLE IF EXISTS '{0}_output_onehot_username'\".format(model_id))\n c.execute(\"SELECT username_id FROM '{0}_input_usernames'\".format(model_id))\n found = [r for r in c]\n fields_usernames_def = \"\"\n fields_usernames_usg = \"\"\n for i in found:\n fields_usernames_def = fields_usernames_def + \", username_\" + str(username_ids[i[0]]) + \" REAL\"\n fields_usernames_usg = fields_usernames_usg + \", username_\" + str(username_ids[i[0]])\n c.execute(\"CREATE TABLE '{0}_output_onehot_username' (comment_id INTEGER\".format(model_id) + fields_usernames_def +\n \", FOREIGN KEY (comment_id) REFERENCES reddit_comments (comment_id))\")\n\n\n # select docs using input* tables, compute onehots, insert into output_onehot*\n\n c.execute(\"\"\"SELECT reddit_comments.comment_id, '{0}_input_tags'.tag_onehot_index, '{0}_input_usernames'.username_onehot_index\n FROM (\n (reddit_comments LEFT OUTER JOIN '{0}_input_tags' ON reddit_comments.tag_id = '{0}_input_tags'.tag_id)\n LEFT OUTER JOIN '{0}_input_usernames' ON reddit_comments.username_id = '{0}_input_usernames'.username_id \n )\n WHERE (reddit_comments.tag_id IN (SELECT tag_id FROM '{0}_input_tags')) \n OR (reddit_comments.username_id IN (SELECT username_id FROM '{0}_input_usernames'))\"\"\".format(model_id))\n c2 = conn.cursor()\n for r in c:\n if r[1] is not None:\n vector_tags = enc_tags.transform([[r[1]]]).toarray() # tag_onehot_index of comment\n c2.execute(\"INSERT INTO '{0}_output_onehot_tag' (comment_id\".format(model_id) + fields_tags_usg + \" )\" +\n \" VALUES (\" + str(r[0]) + \", \" + (repr(vector_tags.tolist()))[2:-2] + \")\") # TODO test! This depends on behaviour od repr()\n\n if r[2] is not None:\n vector_usernames = enc_usernames.transform([[r[2]]]).toarray() # username_onehot_index of comment\n c2.execute(\"INSERT INTO '{0}_output_onehot_username' (comment_id\".format(model_id) + fields_usernames_usg + \" )\" +\n \" VALUES (\" + str(r[0]) + \", \" + (repr(vector_usernames.tolist()))[2:-2] + \")\") # TODO test! This depends on behaviour od repr()\n\n conn.commit()\n conn.close()\n\n\n # def create_general_onehot_tables(self):\n # conn = sqlite3.connect(self.db_file)\n # c = conn.cursor()\n #\n #\n # # TODO compute onehots\n #\n # conn.commit()\n # conn.close()\n\n def find_coments(self, tag_id, from_date, to_date=None):\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n addition_command = ''\n if to_date:\n addition_command = 'AND time < TIMESTAMP({0})'.format(to_date)\n c.execute(\n \"SELECT comment_id, username_id FROM reddit_comments WHERE time >= TIMESTAMP({0}) AND tag_id={1} {2}\"\n .format(from_date, tag_id, addition_command))\n comments = [comment for comment in c]\n conn.close()\n return comments\n\n def get_count_comments_by_tag(self, tag_id, from_date, to_date=None):\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n addition_command = ''\n if to_date:\n addition_command = \" AND time < '{0}'\".format(to_date)\n request = \"SELECT count(*) FROM reddit_comments WHERE reddit_comments.time >= '{0}' AND username_id = {1}{2};\".format(from_date.timestamp(), tag_id, addition_command)\n c.execute(request)\n counts = [result for result in c]\n conn.close()\n return counts[0][0]\n\n def get_average_users_onehot_by_tag(self, model_id, tag_id, from_date, to_date=None):\n average_vector = None\n count_coument = 0\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n addition_command = ''\n if to_date:\n addition_command = \" AND time < '{0}'\".format(to_date)\n request = \"\"\"SELECT '{3}_output_onehot_username'.* \n FROM reddit_comments \n LEFT JOIN '{3}_output_onehot_username' ON reddit_comments.comment_id = '{3}_output_onehot_username'.comment_id \n WHERE reddit_comments.time >= '{0}' AND reddit_comments.tag_id = {1}{2};\"\"\".format(\n from_date, tag_id, addition_command, model_id)\n c.execute(request)\n for coment_onehot in c:\n if coment_onehot[0] is None:\n continue\n count_coument += 1\n curent_onehot = numpy.array(coment_onehot[1:])\n if average_vector is not None:\n average_vector += curent_onehot\n else:\n average_vector = curent_onehot\n average_vector /= count_coument\n conn.close()\n return average_vector\n\n def get_average_tag_onehot_by_user(self, model_id, user_id, from_date, to_date=None):\n average_vector = None\n count_coument = 0\n conn = sqlite3.connect(self.db_file, detect_types=sqlite3.PARSE_DECLTYPES)\n c = conn.cursor()\n addition_command = ''\n if to_date:\n addition_command = ' AND time < {0}'.format(to_date.timestamp())\n request = \"\"\"SELECT '{3}_output_onehot_tag'.* \n FROM reddit_comments \n LEFT JOIN '{3}_output_onehot_tag' ON reddit_comments.comment_id = '{3}_output_onehot_tag'.comment_id \n WHERE reddit_comments.time >= '{0}' AND reddit_comments.username_id = {1}{2};\"\"\".format(\n from_date.timestamp(), user_id, addition_command, model_id)\n c.execute(request)\n for coment_onehot in c:\n if coment_onehot[0] is None:\n continue\n count_coument += 1\n curent_onehot = numpy.array(coment_onehot[1:])\n if average_vector is not None:\n average_vector += curent_onehot\n else:\n average_vector = curent_onehot\n if count_coument > 0:\n average_vector /= count_coument\n conn.close()\n return average_vector\n\n def get_last_coment_by_tag(self, tag_id, to_date=None):\n conn = sqlite3.connect(self.db_file, detect_types=sqlite3.PARSE_DECLTYPES)\n c = conn.cursor()\n addition_command = ''\n if to_date:\n addition_command = \" AND time < '{0}'\".format(to_date.timestamp())\n request = 'SELECT time FROM reddit_comments WHERE tag_id = {0} {1} ORDER BY -time LIMIT 1'.format(tag_id, addition_command)\n c.execute(request)\n result = [r for r in c]\n # TODO: Remove test code\n # Test number of articles with a given tag\n request = 'SELECT time FROM reddit_comments WHERE tag_id = {0} ORDER BY -time'.format(tag_id)\n c.execute(request)\n result2 = [r for r in c]\n ######\n conn.close()\n if result:\n return result[0]\n else:\n return None\n\n def get_last_coment_by_user(self, user_id, tag_id, to_date=None):\n conn = sqlite3.connect(self.db_file, detect_types=sqlite3.PARSE_DECLTYPES)\n c = conn.cursor()\n addition_command = ''\n if to_date:\n addition_command = \" AND time < '{0}'\".format(to_date.timestamp())\n request = \"\"\"SELECT time FROM reddit_comments \n WHERE tag_id = {0} AND username_id = {2} {1} \n ORDER BY -time LIMIT 1\"\"\".format(tag_id, addition_command, user_id)\n c.execute(request)\n result = [r for r in c]\n conn.close()\n if result:\n return result[0]\n else:\n return None\n\n def get_average_tag_of_user(self, model_id, user_id, tag_id, to_date=None):\n count_coument = 0\n average_vector = None\n conn = sqlite3.connect(self.db_file, detect_types=sqlite3.PARSE_DECLTYPES)\n c = conn.cursor()\n addition_command = ''\n if to_date:\n addition_command = \" AND time < '{0}'\".format(to_date.timestamp())\n request = '''\n SELECT \n '{1}_results'.*\n FROM \n reddit_comments \n LEFT JOIN \n '{1}_results' ON reddit_comments.comment_id = '{1}_results'.comment_id \n WHERE \n reddit_comments.user_id = {0} AND reddit_comments.tag_id = {3}{2}'''.format(\n user_id, model_id, addition_command, tag_id\n )\n c.execute(request)\n for coment_onehot in c:\n if coment_onehot[0] is None:\n continue\n count_coument += 1\n curent_onehot = numpy.array(coment_onehot[1:])\n if average_vector is not None:\n average_vector += curent_onehot\n else:\n average_vector = curent_onehot\n if count_coument:\n average_vector /= count_coument\n conn.close()\n return average_vector\n\n def get_average_topic(self, model_id, from_date, to_date=None):\n count_coument = 0\n average_vector = None\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n addition_command = ''\n if to_date:\n addition_command = \" AND time < '{0}'\".format(to_date.timestamp())\n request = '''\n SELECT \n '{1}_results'.* \n FROM \n reddit_comments\n LEFT JOIN \n '{1}_results' ON reddit_comments.comment_id = '{1}_results'.comment_id \n WHERE \n reddit_comments.time >= '{0}'{2}'''.format(\n from_date.timestamp(), model_id, addition_command\n )\n c.execute(request)\n for coment_onehot in c:\n if coment_onehot[0] is None:\n continue\n count_coument += 1\n curent_onehot = numpy.array(coment_onehot[1:])\n if average_vector is not None:\n average_vector += curent_onehot\n else:\n average_vector = curent_onehot\n if count_coument > 0:\n average_vector /= count_coument\n conn.close()\n return average_vector\n\n def get_average_topic_by_tag(self, model_id, tag_id, from_date, to_date=None):\n count_coument = 0\n average_vector = None\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n addition_command = ''\n if to_date:\n addition_command = \" AND time < '{0}'\".format(to_date.timestamp())\n request = '''\n SELECT \n '{1}_results'.*\n FROM \n reddit_comments \n LEFT JOIN \n '{1}_results' ON reddit_comments.comment_id = '{1}_results'.comment_id \n WHERE \n reddit_comments.time >= '{0}' AND reddit_comments.tag_id = {3}{2}'''.format(\n from_date.timestamp(), model_id, addition_command, tag_id\n )\n c.execute(request)\n for coment_onehot in c:\n if coment_onehot[0] is None:\n continue\n count_coument += 1\n curent_onehot = numpy.array(coment_onehot[1:])\n if average_vector is not None:\n average_vector += curent_onehot\n else:\n average_vector = curent_onehot\n if count_coument > 0:\n average_vector /= count_coument\n # else:\n # average_vector = numpy.zeros(self.get_count_topic(model_id))\n conn.close()\n return average_vector\n\n def get_average_topic_by_user(self, model_id, user_id, from_date, to_date=None):\n count_coument = 0\n average_vector = None\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n addition_command = ''\n if to_date:\n addition_command = \" AND time < '{0}'\".format(to_date.timestamp())\n request = \"SELECT '{1}_results'.* \" \\\n \"FROM reddit_comments LEFT JOIN '{1}_results' ON reddit_comments.comment_id = '{1}_results'.comment_id \" \\\n \"WHERE reddit_comments.time >= '{0}' AND reddit_comments.username_id = {3} {2};\".format(\n from_date.timestamp(), model_id, addition_command, user_id)\n c.execute(request)\n for coment_onehot in c:\n if coment_onehot[0] is None:\n continue\n count_coument += 1\n curent_onehot = numpy.array(coment_onehot[1:])\n if average_vector is not None:\n average_vector += curent_onehot\n else:\n average_vector = curent_onehot\n if count_coument > 0:\n average_vector /= count_coument\n # else:\n # average_vector = numpy.zeros(self.get_count_topic(model_id))\n conn.close()\n return average_vector\n\n def get_average_users_onehot(self, model_id, from_date, to_date=None):\n average_vector = None\n count_coument = 0\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n addition_command = ''\n if to_date:\n addition_command = \" AND time < '{0}'\".format(to_date.timestamp())\n request = \"\"\"SELECT '{2}_output_onehot_username'.* \n FROM reddit_comments \n LEFT JOIN '{2}_output_onehot_username' ON reddit_comments.comment_id = '{2}_output_onehot_username'.comment_id \n WHERE reddit_comments.time >= '{0}'{1};\"\"\".format(from_date.timestamp(), addition_command, model_id)\n c.execute(request)\n for coment_onehot in c:\n if coment_onehot[0] is None:\n continue\n count_coument += 1\n curent_onehot = numpy.array(coment_onehot[1:])\n if average_vector is not None:\n average_vector += curent_onehot\n else:\n average_vector = curent_onehot\n if count_coument > 0:\n average_vector /= count_coument\n conn.close()\n return average_vector\n\n def get_average_tag_onehot(self, model_id, from_date, to_date=None):\n average_vector = None\n count_coument = 0\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n addition_command = ''\n if to_date:\n addition_command = \" AND time < '{0}'\".format(to_date.timestamp())\n request = \"\"\"SELECT '{2}_output_onehot_tag'.* \n FROM reddit_comments \n LEFT JOIN '{2}_output_onehot_tag' ON reddit_comments.comment_id = '{2}_output_onehot_tag'.comment_id \n WHERE reddit_comments.time >= '{0}'{1};\"\"\".format(\n from_date.timestamp(), addition_command, model_id)\n c.execute(request)\n for coment_onehot in c:\n if coment_onehot[0] is None:\n continue\n count_coument += 1\n curent_onehot = numpy.array(coment_onehot[1:])\n if average_vector is not None:\n average_vector += curent_onehot\n else:\n average_vector = curent_onehot\n if count_coument > 0:\n average_vector /= count_coument\n conn.close()\n return average_vector\n\n def get_ordered_table_by_name(self, name, order):\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n request = \"SELECT * FROM {} ORDER BY {}\".format(name, order)\n c.execute(request)\n result = [r for r in c]\n conn.close()\n return result\n\n def get_read_categories(self, user_id, time_state, time_next_state):\n request = \"\"\"SELECT DISTINCT tag_id \n FROM reddit_comments \n WHERE username_id = {2} AND time >= '{0}' AND time < '{1}'\"\"\".format(time_state, time_next_state, user_id)\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n c.execute(request)\n result = [r for r in c]\n conn.close()\n return result\n\n def get_tag_id(self, tag):\n request = \"\"\"SELECT tag_id FROM tags WHERE tag = '{0}' \"\"\".format(tag)\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n c.execute(request)\n result = [r for r in c]\n conn.close()\n return result[0][0]\n\n def get_count_topic(self, model_id):\n # request = \\\n # \"\"\"SELECT count(column_name)\n # FROM information_schema.columns\n # WHERE table_name='{0}_results'\"\"\".format(model_id)\n # conn = sqlite3.connect(self.db_file)\n # c = conn.cursor()\n # c.execute(request)\n # result = [r for r in c]\n # conn.close()\n # count_columns = result[0]\n # count_topic = count_columns - 1\n request = \"PRAGMA table_info('{0}_results')\".format(model_id)\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n c.execute(request)\n count_columns = len(c.fetchall())\n count_topic = count_columns - 1\n return count_topic\n\n def get_count_used_tags(self, model_id):\n # request = \\\n # \"\"\"SELECT count(column_name)\n # FROM information_schema.columns\n # WHERE table_name='{0}_output_onehot_tag'\"\"\".format(model_id)\n # conn = sqlite3.connect(self.db_file)\n # c = conn.cursor()\n # c.execute(request)\n # result = [r for r in c]\n # conn.close()\n # count_columns = result[0]\n # count_tags = count_columns - 1\n request = \"PRAGMA table_info('{0}_output_onehot_tag')\".format(model_id)\n conn = sqlite3.connect(self.db_file)\n c = conn.cursor()\n c.execute(request)\n count_columns = len(c.fetchall())\n count_tags = count_columns - 1\n return count_tags\n\n def get_coment(self, coment_id):\n request = \"\"\"SELECT comment_id, time, username_id, tag_id \n FROM reddit_comments \n WHERE comment_id = {0}\"\"\".format(coment_id)\n conn = sqlite3.connect(self.db_file, detect_types=sqlite3.PARSE_DECLTYPES)\n c = conn.cursor()\n c.execute(request)\n result = c.fetchall()\n conn.close()\n if len(result) == 0:\n return None\n else:\n return {\n 'comment_id': result[0][0],\n 'time': result[0][1],\n 'username_id': result[0][2],\n 'tag_id': result[0][3],\n }\n\n\nclass Corpus_db:\n\n\n def __init__(self, parent):\n self._current = -1\n self.parent = parent\n\n\n def __getitem__(self, key):\n conn = sqlite3.connect(self.parent.db_file)\n c = conn.cursor()\n\n if self.parent.occ_threshold == 0:\n c.execute(\"SELECT word_id, occurrences FROM occurrences WHERE comment_id=\" + str(key + 1))\n else:\n c.execute(\"SELECT word_id_index_\" + str(self.parent.occ_threshold) + \".word_id_out, occurrences FROM occurrences \"\n + \"INNER JOIN word_id_index_\" + str(self.parent.occ_threshold) +\n \" ON occurrences.word_id = word_id_index_\" + str(self.parent.occ_threshold) + \".word_id\"\n + \" WHERE comment_id=\" + str(key+1))\n corpus = []\n for row in c:\n corpus.append((row[0]-1, row[1]))\n\n conn.close()\n if corpus:\n return corpus\n else:\n raise IndexError(\"index not found in db\")\n\n # def __next__(self):\n # self._current += 1\n # try:\n # return self[self._current]\n # except IndexError:\n # # self._current = -1 # TODO test and check whether such implementation is valid\n # raise StopIteration\n\n def __iter__(self):\n return Corpus_db_iter(self.parent, self)\n\n # self._current = -1 # TODO test and check whether such implementation is valid\n # return self\n\n def __len__(self):\n conn = sqlite3.connect(self.parent.db_file)\n c = conn.cursor()\n\n if self.parent.occ_threshold == 0:\n c.execute(\"SELECT count(*) FROM reddit_comments\")\n else:\n c.execute(\"SELECT count(*) FROM (\"\n + \"SELECT DISTINCT occurrences.comment_id \"\n + \"FROM occurrences INNER JOIN word_id_index_\" + str(self.parent.occ_threshold)\n + \" ON occurrences.word_id = word_id_index_\" + str(self.parent.occ_threshold) + \".word_id)\")\n # count of documents that contain at least one word listed in word_id_index\n\n result = [row[0] for row in c]\n conn.close()\n\n if result:\n return result[0]\n else:\n return 0\n\n\nclass Corpus_db_iter:\n\n def __init__(self, parent_dict, parent_corpus):\n self.parent = parent_dict\n self.corpus = parent_corpus\n self._current = -1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n self._current += 1\n try:\n return self.corpus[self._current]\n except IndexError:\n # self._current = -1 # TODO test and check whether such implementation is valid\n raise StopIteration\n\n\n\n","sub_path":"V1/dictionary_db.py","file_name":"dictionary_db.py","file_ext":"py","file_size_in_byte":43700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"125962352","text":"import logging\n\nimport pytest\n\nfrom ocs_ci.ocs.exceptions import CommandFailed\nfrom ocs_ci.framework.testlib import (\n ignore_leftovers,\n E2ETest,\n tier3,\n skipif_openshift_dedicated,\n skipif_ocs_version,\n skipif_external_mode,\n)\nfrom ocs_ci.helpers.sanity_helpers import Sanity\nfrom ocs_ci.helpers.helpers import (\n modify_statefulset_replica_count,\n validate_pv_delete,\n wait_for_resource_state,\n)\nfrom ocs_ci.ocs import constants, defaults\nfrom ocs_ci.ocs.ocp import OCP\nfrom ocs_ci.ocs.resources.pod import get_noobaa_pods, wait_for_storage_pods\nfrom ocs_ci.ocs.resources.pvc import get_pvc_objs, delete_pvcs, create_restore_pvc\nfrom ocs_ci.ocs.resources.ocs import OCS\n\nlog = logging.getLogger(__name__)\n\n\n@tier3\n@ignore_leftovers\n@pytest.mark.polarion_id(\"OCS-2605\")\n@pytest.mark.bugzilla(\"1924047\")\n@skipif_ocs_version(\"<4.6\")\n@skipif_openshift_dedicated\n@skipif_external_mode\nclass TestNoobaaBackupAndRecovery(E2ETest):\n \"\"\"\n Test to verify noobaa backup and recovery\n\n \"\"\"\n\n @pytest.fixture(autouse=True)\n def init_sanity(self):\n \"\"\"\n Initialize Sanity instance\n\n \"\"\"\n self.sanity_helpers = Sanity()\n\n @pytest.fixture(autouse=True)\n def teardown_fixture(self, request):\n def finalizer():\n # Get the statefulset replica count\n sst_obj = OCP(\n kind=constants.STATEFULSET,\n namespace=constants.OPENSHIFT_STORAGE_NAMESPACE,\n )\n noobaa_db_sst_obj = sst_obj.get(resource_name=self.noobaa_db_sst_name)\n if noobaa_db_sst_obj[\"spec\"][\"replicas\"] != 1:\n modify_statefulset_replica_count(\n statefulset_name=self.noobaa_db_sst_name, replica_count=1\n ), f\"Failed to scale up the statefulset {self.noobaa_db_sst_name}\"\n\n try:\n self.restore_pvc_obj.delete()\n except CommandFailed as ex:\n if f'\"{ self.restore_pvc_obj.name}\" not found' not in str(ex):\n raise ex\n\n request.addfinalizer(finalizer)\n\n def test_noobaa_db_backup_and_recovery(\n self,\n pvc_factory,\n pod_factory,\n snapshot_factory,\n bucket_factory,\n rgw_bucket_factory,\n ):\n \"\"\"\n Test case to verify noobaa backup and recovery\n\n 1. Take snapshot db-noobaa-db-0 PVC and retore it to PVC\n 2. Scale down the statefulset noobaa-db\n 3. Get the yaml of the current PVC, db-noobaa-db-0 and\n change the parameter persistentVolumeReclaimPolicy to Retain for restored PVC\n 4. Delete both PVCs, the PV for the original claim db-noobaa-db-0 will be removed.\n The PV for claim db-noobaa-db-0-snapshot-restore will move to ‘Released’\n 5. Edit again restore PV and remove the claimRef section.\n The volume will transition to Available.\n 6. Edit the yaml db-noobaa-db-0.yaml and change the setting volumeName to restored PVC.\n 7. Scale up the stateful set again and the pod should be running\n\n \"\"\"\n\n # Initialise variable\n self.noobaa_db_sst_name = \"noobaa-db-pg\"\n\n # Get noobaa pods before execution\n noobaa_pods = get_noobaa_pods()\n\n # Get noobaa PVC before execution\n noobaa_pvc_obj = get_pvc_objs(pvc_names=[\"db-noobaa-db-pg-0\"])\n noobaa_pv_name = noobaa_pvc_obj[0].get(\"spec\").get(\"spec\").get(\"volumeName\")\n\n # Take snapshot db-noobaa-db-0 PVC\n log.info(f\"Creating snapshot of the {noobaa_pvc_obj[0].name} PVC\")\n snap_obj = snapshot_factory(\n pvc_obj=noobaa_pvc_obj[0],\n wait=True,\n snapshot_name=f\"{noobaa_pvc_obj[0].name}-snapshot\",\n )\n log.info(f\"Successfully created snapshot {snap_obj.name} and in Ready state\")\n\n # Restore it to PVC\n log.info(f\"Restoring snapshot {snap_obj.name} to create new PVC\")\n sc_name = noobaa_pvc_obj[0].get().get(\"spec\").get(\"storageClassName\")\n pvc_size = (\n noobaa_pvc_obj[0]\n .get()\n .get(\"spec\")\n .get(\"resources\")\n .get(\"requests\")\n .get(\"storage\")\n )\n self.restore_pvc_obj = create_restore_pvc(\n sc_name=sc_name,\n snap_name=snap_obj.name,\n namespace=snap_obj.namespace,\n size=pvc_size,\n pvc_name=f\"{snap_obj.name}-restore\",\n volume_mode=snap_obj.parent_volume_mode,\n access_mode=snap_obj.parent_access_mode,\n )\n wait_for_resource_state(self.restore_pvc_obj, constants.STATUS_BOUND)\n self.restore_pvc_obj.reload()\n log.info(\n f\"Succeesfuly created PVC {self.restore_pvc_obj.name} \"\n f\"from snapshot {snap_obj.name}\"\n )\n\n # Scale down the statefulset noobaa-db\n modify_statefulset_replica_count(\n statefulset_name=self.noobaa_db_sst_name, replica_count=0\n ), f\"Failed to scale down the statefulset {self.noobaa_db_sst_name}\"\n\n # Get the noobaa-db PVC\n pvc_obj = OCP(\n kind=constants.PVC, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE\n )\n noobaa_pvc_yaml = pvc_obj.get(resource_name=noobaa_pvc_obj[0].name)\n\n # Get the restored noobaa PVC and\n # change the parameter persistentVolumeReclaimPolicy to Retain\n restored_noobaa_pvc_obj = get_pvc_objs(pvc_names=[f\"{snap_obj.name}-restore\"])\n restored_noobaa_pv_name = (\n restored_noobaa_pvc_obj[0].get(\"spec\").get(\"spec\").get(\"volumeName\")\n )\n pv_obj = OCP(kind=constants.PV, namespace=constants.OPENSHIFT_STORAGE_NAMESPACE)\n params = '{\"spec\":{\"persistentVolumeReclaimPolicy\":\"Retain\"}}'\n assert pv_obj.patch(resource_name=restored_noobaa_pv_name, params=params), (\n \"Failed to change the parameter persistentVolumeReclaimPolicy\"\n f\" to Retain {restored_noobaa_pv_name}\"\n )\n\n # Delete both PVCs\n delete_pvcs(pvc_objs=[noobaa_pvc_obj[0], restored_noobaa_pvc_obj[0]])\n\n # Validate original claim db-noobaa-db-0 removed\n assert validate_pv_delete(\n pv_name=noobaa_pv_name\n ), f\"PV not deleted, still exist {noobaa_pv_name}\"\n\n # Validate PV for claim db-noobaa-db-0-snapshot-restore is in Released state\n pv_obj.wait_for_resource(\n condition=constants.STATUS_RELEASED, resource_name=restored_noobaa_pv_name\n )\n\n # Edit again restore PV and remove the claimRef section\n log.info(f\"Remove the claimRef section from PVC {restored_noobaa_pv_name}\")\n params = '[{\"op\": \"remove\", \"path\": \"/spec/claimRef\"}]'\n pv_obj.patch(\n resource_name=restored_noobaa_pv_name, params=params, format_type=\"json\"\n )\n log.info(\n f\"Successfully removed claimRef section from PVC {restored_noobaa_pv_name}\"\n )\n\n # Validate PV is in Available state\n pv_obj.wait_for_resource(\n condition=constants.STATUS_AVAILABLE, resource_name=restored_noobaa_pv_name\n )\n\n # Edit the yaml db-noobaa-db-0.yaml and change the\n # setting volumeName to restored PVC\n noobaa_pvc_yaml[\"spec\"][\"volumeName\"] = restored_noobaa_pv_name\n noobaa_pvc_yaml = OCS(**noobaa_pvc_yaml)\n noobaa_pvc_yaml.create()\n\n # Validate noobaa PVC is in bound state\n pvc_obj.wait_for_resource(\n condition=constants.STATUS_BOUND,\n resource_name=noobaa_pvc_obj[0].name,\n timeout=120,\n )\n\n # Scale up the statefulset again\n assert modify_statefulset_replica_count(\n statefulset_name=self.noobaa_db_sst_name, replica_count=1\n ), f\"Failed to scale up the statefulset {self.noobaa_db_sst_name}\"\n\n # Validate noobaa pod is up and running\n pod_obj = OCP(kind=constants.POD, namespace=defaults.ROOK_CLUSTER_NAMESPACE)\n pod_obj.wait_for_resource(\n condition=constants.STATUS_RUNNING,\n resource_count=len(noobaa_pods),\n selector=constants.NOOBAA_APP_LABEL,\n )\n\n # Change the parameter persistentVolumeReclaimPolicy to Delete again\n params = '{\"spec\":{\"persistentVolumeReclaimPolicy\":\"Delete\"}}'\n assert pv_obj.patch(resource_name=restored_noobaa_pv_name, params=params), (\n \"Failed to change the parameter persistentVolumeReclaimPolicy\"\n f\" to Delete {restored_noobaa_pv_name}\"\n )\n log.info(\"Changed the parameter persistentVolumeReclaimPolicy to Delete again\")\n\n # Verify all storage pods are running\n wait_for_storage_pods()\n\n # Creating Resources\n log.info(\"Creating Resources using sanity helpers\")\n self.sanity_helpers.create_resources(\n pvc_factory, pod_factory, bucket_factory, rgw_bucket_factory\n )\n # Deleting Resources\n self.sanity_helpers.delete_resources()\n\n # Verify everything running fine\n log.info(\"Verifying All resources are Running and matches expected result\")\n self.sanity_helpers.health_check(tries=120)\n","sub_path":"tests/e2e/kcs/test_noobaa_db_backup_and_recovery.py","file_name":"test_noobaa_db_backup_and_recovery.py","file_ext":"py","file_size_in_byte":9148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"365160269","text":"import matplotlib.pyplot as plt\nfrom scipy.misc import imread\n\ndef download_image(url):\n filename = url[url.rindex('/')+1:]\n try:\n with open(filename, 'rb') as fp:\n return imread(fp) / 255\n except FileNotFoundError:\n import urllib.request\n with open(filename, 'w+b') as fp, urllib.request.urlopen(url) as r:\n fp.write(r.read())\n return imread(fp) / 255\n\nimg_facade = download_image('https://users-cs.au.dk/rav/ml/handins/h4/nygaard_facade.jpg')\nimg_stairs = download_image('https://users-cs.au.dk/rav/ml/handins/h4/nygaard_stairs.jpg')\nprint(img_facade)\n\nplt.imshow(img_facade)\nplt.show()\n\n\n\nplt.imshow(img_stairs)\nplt.show()","sub_path":"Python/src/Handin4/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"636472675","text":"# Time: O(n)\n# Space: O(h), h is the depth of the recursion\n\n# 394\n# Given an encoded string, return it's decoded string.\n#\n# The encoding rule is: k[encoded_string],\n# where the encoded_string inside the square brackets is\n# being repeated exactly k times. Note that k is guaranteed\n# to be a positive integer.\n#\n# You may assume that the input string is always valid;\n# No extra white spaces, square brackets are well-formed, etc.\n#\n# Furthermore, you may assume that the original data does not\n# contain any digits and that digits are only for those repeat numbers, k.\n# For example, there won't be input like 3a or 2[4].\n#\n# Examples:\n#\n# s = \"3[a]2[bc]\", return \"aaabcbc\".\n# s = \"3[a2[c]]\", return \"accaccacc\".\n# s = \"2[abc]3[cd]ef\", return \"abcabccdcdcdef\".\n\nclass Solution(object):\n def decodeString(self, s): # USE THIS\n stack, curNum, curString = [], 0, ''\n for c in s:\n if c == '[':\n stack.append(curString)\n stack.append(curNum)\n curString, curNum = '', 0\n elif c == ']':\n num = stack.pop()\n prevString = stack.pop()\n curString = prevString + num * curString\n elif c.isdigit():\n curNum = curNum*10 + int(c)\n else:\n curString += c\n return curString\n\n def decodeString_stack2(self, s):\n stack = [[\"\", 1]] # [subs, repeatNum]\n num = 0\n for ch in s:\n if ch.isdigit():\n num = num * 10 + ord(ch) - ord(\"0\")\n elif ch == '[':\n stack.append([\"\", num])\n num = 0\n elif ch == ']':\n subs, k = stack.pop()\n stack[-1][0] += subs * k\n else:\n stack[-1][0] += ch\n return stack[0][0]\n\n def decodeString_recur(self, s): # recursion: although some repeat scan, still O(n)\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n ans, i, j = '', 0, len(s)\n while i < j:\n if s[i].isalpha():\n ans += s[i]\n i += 1\n elif s[i].isdigit():\n n = 0\n while s[i].isdigit():\n n = 10 * n + int(s[i])\n i += 1\n brkt2 = i\n score = 1\n while score != 0: # repeat scan, the substring will be scanned again in recursion\n brkt2 += 1\n if s[brkt2] == '[':\n score += 1\n elif s[brkt2] == ']':\n score -= 1\n ans += self.decodeString(s[i + 1:brkt2]) * n\n i = brkt2 + 1\n\n return ans\n\n def decodeString_recur2(self, s): # quite some KENG (condition check)\n def foo(s, i):\n ans = ''\n while i < len(s) and s[i] != ']':\n if s[i].isalpha():\n ans += s[i]\n i += 1\n elif s[i].isdigit():\n n = 0\n while s[i].isdigit():\n n = 10*n + int(s[i])\n i += 1\n i += 1 # pass '['\n subs, prevEnd = foo(s, i)\n ans += subs * n\n i = prevEnd + 1 # pass ']'\n return ans, i\n\n return foo(s, 0)[0]\n\n\n # Time: O(n)\n # Space: O(n)\n def decodeString_stack3(self, s): # hard to understand: curr[] append to strs[]\n curr, nums, strs = [], [], []\n n = 0\n\n for c in s:\n if c.isdigit():\n n = n * 10 + ord(c) - ord('0')\n elif c.isalpha():\n curr.append(c)\n elif c == '[':\n nums.append(n)\n strs.append(curr)\n n, curr = 0, []\n elif c == ']':\n strs[-1].extend(curr * nums.pop())\n curr = strs.pop()\n return \"\".join(curr)\n\nprint(Solution().decodeString(\"3[a]2[bc]\")) # \"aaabcbc\"\nprint(Solution().decodeString(\"3[a2[c]]\")) # \"accaccacc\"\nprint(Solution().decodeString(\"2[abc]3[cd]ef\")) # \"abcabccdcdcdef\"\n","sub_path":"Python/decode-string.py","file_name":"decode-string.py","file_ext":"py","file_size_in_byte":4166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"90030841","text":"'''\nCreated on January 20, 2015\n\n@author: Robert PASTOR\n\ndemonstrate that Vertical Phase changes according to aircraft CAS speed\n\n'''\nimport time\n\nfrom Home.Environment.Atmosphere import Atmosphere\nfrom Home.Environment.Earth import Earth\n\nfrom Home.BadaAircraftPerformance.BadaAircraftDatabaseFile import BadaAircraftDatabase\nfrom Home.BadaAircraftPerformance.BadaAircraftFile import BadaAircraft\nfrom Home.Environment.AirportDatabaseFile import AirportsDatabase\nfrom Home.Environment.RunWaysDatabaseFile import RunWayDataBase\n\nfrom Home.Guidance.GroundRunLegFile import GroundRunLeg\nfrom Home.Guidance.DescentGlideSlopeFile import DescentGlideSlope\n\nFeet2Meter = 0.3048 # 1 feet = 0.3048 meter\nMeter2Feet = 3.2808399 \n\nif __name__ == '__main__':\n\n print ( '=========== main start ==================' )\n aircraftIcaoCode = 'B744'\n \n atmosphere = Atmosphere()\n assert (not(atmosphere is None))\n \n earth = Earth()\n assert (not(earth is None))\n\n acBd = BadaAircraftDatabase()\n assert acBd.read()\n \n print ( '==================== load airports ==================== '+ time.strftime(\"%c\") )\n airportsDB = AirportsDatabase()\n assert airportsDB.read()\n \n departureAirport = airportsDB.getAirportFromICAOCode('LFPG')\n assert not(departureAirport is None)\n print ( departureAirport )\n \n arrivalAirportIcaoCode = 'LFML'\n arrivalAirport = airportsDB.getAirportFromICAOCode(arrivalAirportIcaoCode)\n assert not(arrivalAirport is None)\n print ( arrivalAirport )\n \n runwaysDb = RunWayDataBase()\n assert runwaysDb.read()\n \n arrivalRunway = runwaysDb.getFilteredRunWays(arrivalAirportIcaoCode, 'Landing')\n \n if ( acBd.aircraftExists(aircraftIcaoCode) \n and acBd.aircraftPerformanceFileExists(acBd.getAircraftPerformanceFile(aircraftIcaoCode))):\n aircraft = BadaAircraft(aircraftIcaoCode, \n acBd.getAircraftPerformanceFile(aircraftIcaoCode),\n atmosphere,\n earth)\n aircraft.dump()\n assert not(aircraft is None)\n\n elapsedTimeSeconds = 0.0\n deltaTimeSeconds = 1.0\n \n aircraft.initStateVector(elapsedTimeSeconds = 0.0, \n trueAirSpeedMetersSecond = 0.0, \n airportFieldElevationAboveSeaLevelMeters = 0.0)\n \n aircraft.setTargetCruiseFlightLevel(RequestedFlightLevel = 310.0 , \n departureAirportAltitudeMSLmeters = 0.0)\n aircraft.setAircraftMassKilograms(aircraftMassKilograms = 285700.0)\n aircraft.setTargetCruiseMach(cruiseMachNumber = 0.8)\n \n tas = aircraft.getCurrentTrueAirSpeedMetersSecond()\n previousAltitudeMSLmeters = 0.0\n t0 = time.clock()\n print ( 'simulation start= {0} seconds'.format(t0) )\n \n print ( '=========== simulation start ==================' )\n endOfSimulation = False\n currentPosition = departureAirport\n \n arrivalGroundRun = GroundRunLeg( runway = arrivalRunway,\n aircraft = aircraft,\n airport = arrivalAirport )\n touchDownWayPoint = arrivalGroundRun.computeTouchDownWayPoint()\n print ( touchDownWayPoint )\n\n print ( '===================== final 3 degrees descending glide slope ================' )\n descentGlideSlope = DescentGlideSlope( runway = arrivalRunway,\n aircraft = aircraft,\n arrivalAirport = arrivalAirport,\n descentGlideSlopeDegrees = 3.0)\n ''' if there is a fix nearer to 5 nautics of the touch-down then limit size of simulated glide slope '''\n descentGlideSlopeSizeNautics = 5.0\n\n descentGlideSlope.buildSimulatedGlideSlope(descentGlideSlopeSizeNautics)\n firstGlideSlopeWayPoint = descentGlideSlope.getVertex(v=0).getWeight()\n aircraft.setTargetApproachWayPoint(firstGlideSlopeWayPoint)\n aircraft.setArrivalRunwayTouchDownWayPoint(touchDownWayPoint)\n \n distanceStillToFlyMeters = departureAirport.getDistanceMetersTo(arrivalAirport)\n print ( 'distance still To Fly Meters= {0:.2f} meters'.format(distanceStillToFlyMeters) )\n \n index = 0\n while (endOfSimulation == False and not(aircraft.isLanding())):\n endOfSimulation, deltaDistanceMeters, altitudeMeters = aircraft.fly(elapsedTimeSeconds, \n deltaTimeSeconds, \n distanceStillToFlyMeters,\n currentPosition)\n distanceStillToFlyMeters = distanceStillToFlyMeters - deltaDistanceMeters\n RateOfClimbDescentFeetMinute = ((altitudeMeters - previousAltitudeMSLmeters) * Meter2Feet) / (deltaTimeSeconds / 60.0) \n previousAltitudeMSLmeters = altitudeMeters\n #if math.fmod(elapsedTimeSecond)\n #print 'rate of climb / descent= {0} feet/minute'.format(RateOfClimbDescentFeetMinute)\n #print 'altitude= {0} feet'.format(altitudeMeters * Meter2Feet)\n elapsedTimeSeconds += deltaTimeSeconds\n bearingDegrees = currentPosition.getBearingDegreesTo(firstGlideSlopeWayPoint)\n newWayPoint = currentPosition.getWayPointAtDistanceBearing(Name='pt-{0}'.format(index), \n DistanceMeters= deltaDistanceMeters, \n BearingDegrees = bearingDegrees)\n newWayPoint.setAltitudeAboveSeaLevelMeters(altitudeMeters)\n distanceStillToFlyMeters = newWayPoint.getDistanceMetersTo(arrivalAirport)\n currentPosition = newWayPoint\n index = index + 1\n \n print ( '=========== create State Vector output file ==================' )\n print ( 'simulation end - duration= {0} seconds'.format(time.clock()-t0) )\n aircraft.createStateVectorOutputFile()\n print ( '=========== simulation end ==================' )\n","sub_path":"Home/Tests/xTest_001_Vertical_Phase_Speed_B747.py","file_name":"xTest_001_Vertical_Phase_Speed_B747.py","file_ext":"py","file_size_in_byte":6082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"513566104","text":"# Author: Grigoriy Kraynov\n# Public domain\n\n\ndef gcd(a, b):\n if a == 0:\n return b, 0, 1\n else:\n g, y, x = gcd(b % a, a)\n return g, x - (b // a) * y, y\n\n\ndef modulo_inverse(a, modulo):\n cd, x, y = gcd(a, modulo)\n if cd != 1:\n # No modulo inverse\n return None\n else:\n return x % modulo\n\n\ndef forge_signature(goal_msg, pubkey):\n with Oracle() as oracle:\n # sign = oracle.sign(m_as_int)\n # valid = oracle.verify(goal, sign)\n pass\n\n if valid:\n return hex(sign)[2:].upper()\n else:\n return None\n\n\ndef main():\n goal = 'Crypto is hard --- even schemes that look complex can be broken'\n key = 0\n sign = forge_signature(goal, key)\n print(sign)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"RsaOracle/stub.py","file_name":"stub.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"224412066","text":"\r\nfrom googletrans import Translator\r\ntranslator = Translator()\r\nf1=open('friends.srt',\"r\",encoding=\"utf-8\")\r\nf2=open('hindi2.srt',\"a\",encoding=\"utf-8\")\r\n\r\nfor line in f1:\r\n tr = translator.translate(line, dest='hi')\r\n print(tr.text)\r\n f2.write(tr.text+\"\\n\")\r\n\r\nf1.close()\r\nf2.close()\r\n\r\nimport pysrt\r\nsubs = pysrt.open('hindi.srt')\r\n\r\nfrom pydub import AudioSegment\r\nfrom pydub.playback import play\r\nfrom gtts import gTTS \r\nimport os\r\n\r\n\r\ndef speed_swifter(sound, speed):\r\n return sound._spawn(sound.raw_data, overrides={\"frame_rate\": int(sound.frame_rate * speed)})\r\n\r\nstartmilli=0\r\ns = AudioSegment.silent(duration=0)\r\nfor sub in subs:\r\n diff = (sub.start.hours*3600*1000+sub.start.minutes*60*1000+sub.start.seconds*1000+sub.start.milliseconds)-startmilli\r\n print(diff)\r\n s=s+AudioSegment.silent(duration=diff)\r\n myobj = gTTS(text=sub.text, lang='hi', slow=False)\r\n myobj.save(\"voice.mp3\")\r\n vc = AudioSegment.from_mp3('voice.mp3')\r\n got_len = len(vc)\r\n ori_len = (sub.end.hours*3600*1000+sub.end.minutes*60*1000+sub.end.seconds*1000+sub.end.milliseconds) - (sub.start.hours*3600*1000+sub.start.minutes*60*1000+sub.start.seconds*1000+sub.start.milliseconds)\r\n speed = float(got_len/ori_len)\r\n sound_with_altered_frame_rate = speed_swifter(vc,speed)\r\n s=s+sound_with_altered_frame_rate\r\n startmilli = (sub.end.hours*3600*1000+sub.end.minutes*60*1000+sub.end.seconds*1000+sub.end.milliseconds)\r\n\r\ns.export(\"final_output.mp3\",format=\"mp3\")\r\nos.system('del voice.mp3')","sub_path":"translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"272061085","text":"import re\nimport numpy as np\n\ndef getFilteredDataset(filename) :\n lines = [line.rstrip('\\n') for line in open(filename)]\n fileArr = []\n threadNames = []\n for i in range(len(lines)):\n line = lines[i]\n dataCache = []\n matchObj = re.match( r'.*\\d(.*?): threadName=\"(.*?)\" loggerName=\"c.a.i.b.i.t.s.(.*?)\"(.*?) msg=\"(.*?)\"', line, re.M|re.I)\n if matchObj:\n #Status\n dataCache.append(matchObj.group(1).strip())\n #ThreadName\n dataCache.append(matchObj.group(2).strip())\n #LoggerName\n dataCache.append(matchObj.group(3).strip())\n #LoggerMsg\n dataCache.append(matchObj.group(5).strip())\n else:\n print(\"No match!!!!! \" , line)\n \n # arr = np.append(arr, dataCache)\n fileArr.append(dataCache)\n #break\n\n print(len(fileArr))\n return fileArr\n\n# Execution\nprint(getFilteredDataset('ServerLog.log'))\n\n\n# str = '2017-01-19T00:07:54 INFO : threadName=\"TMCRMExec-4\" loggerName=\"c.a.i.b.i.t.s.ProcessStoredQueueDataService\" txnId=\"\" msg=\"ACCOUNT_ID=6feb5d33-b655-48f9-943c-1e1d423ee81e MSG=Processing AT message for accountId\"'\n# matchObj = re.match( r'.*\\d(.*?): threadName=\"(.*?)\" loggerName=\"c.a.i.b.i.t.s.(.*?)\"(.*?) msg=\"(.*?)\"', str, re.M|re.I)\n# if matchObj:\n# print(\"matchObj.group(0) : \", matchObj.group(1))\n# print(\"matchObj.group(1) : \", matchObj.group(2))\n# print(\"matchObj.group(2) : \", matchObj.group(3))\n# print(\"matchObj.group(3) : \", matchObj.group(4))\n# print(\"matchObj.group(4) : \", matchObj.group(5))\n# else:\n# print(\"No match!!\")\n","sub_path":"src/ServerLogNotification.py","file_name":"ServerLogNotification.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"266162973","text":"from bs4 import BeautifulSoup\nimport requests\nimport json\nimport pprint\n\ndef Ecommerce_site_data():\n url=\"https://webscraper.io/test-sites\"\n api=requests.get(url)\n soup=BeautifulSoup(api.text,\"html.parser\")\n main_div=soup.find(\"div\",class_=\"container test-sites\")\n div=main_div.find_all(\"div\",class_=\"col-md-7 pull-right\")\n \n list=[]\n position=0\n for i in div:\n position+=1\n title=i.find(\"h2\",class_=\"site-heading\").a.get_text().strip()\n num=title\n\n url=i.find(\"h2\",class_=\"site-heading\").a[\"href\"]\n link=\"https://webscraper.io/\"+url\n \n dict={\"Position\":position, \"Site Name\":num, \"url\":link}\n list.append(dict)\n\n with open(\"E-Comarce_data.json\",\"w\") as k:\n json.dump(list,k,indent=5)\n \n return list\nEcommerce_site_data()","sub_path":"E-commerce.py","file_name":"E-commerce.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"246605781","text":"from tools.load import LoadMatrix\nfrom numpy import random\nlm=LoadMatrix()\n\nN = 100\n\nrandom.seed(17)\nground_truth = random.randn(N)\npredicted = random.randn(N)\n\nparameter_list = [[ground_truth,predicted]]\n\ndef evaluation_meansquarederror_modular(ground_truth, predicted):\n\tfrom shogun.Features import RegressionLabels\n\tfrom shogun.Evaluation import MeanSquaredError\n\n\tground_truth_labels = RegressionLabels(ground_truth)\n\tpredicted_labels = RegressionLabels(predicted)\n\t\n\tevaluator = MeanSquaredError()\n\tmse = evaluator.evaluate(predicted_labels,ground_truth_labels)\n\n\treturn mse\n\n\nif __name__=='__main__':\n\tprint('MeanSquaredError')\n\tevaluation_meansquarederror_modular(*parameter_list[0])\n\n","sub_path":"build/shogun_lib/examples/undocumented/python_modular/evaluation_meansquarederror_modular.py","file_name":"evaluation_meansquarederror_modular.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"465193225","text":"#! /usr/bin/env python\n#\n# written by Sylvain Rouquette, 2011\n\"\"\"\nInstall pep8 module:\n$ easy_install pep8\n\tor\n$ pip install pep8\n\nTo add the pep8 tool to the waf file:\n$ ./waf-light --tools=compat15,pep8\n\tor, if you have waf >= 1.6.2\n$ ./waf update --files=pep8\n\n\nThen add this to your wscript:\n\n[at]extension('.py', 'wscript')\ndef run_pep8(self, node):\n\tself.create_task('Pep8', node)\n\n\"\"\"\nimport threading\n\nfrom waflib import Options\nfrom waflib import Task\n\npep8 = __import__(\"pep8\")\n\n\nclass Pep8(Task.Task):\n color = \"PINK\"\n lock = threading.Lock()\n\n def check_options(self):\n if pep8.options:\n return\n pep8.options = Options.options\n pep8.options.prog = \"pep8\"\n excl = pep8.options.exclude.split(\",\")\n pep8.options.exclude = [s.rstrip(\"/\") for s in excl]\n if pep8.options.filename:\n pep8.options.filename = pep8.options.filename.split(\",\")\n if pep8.options.select:\n pep8.options.select = pep8.options.select.split(\",\")\n else:\n pep8.options.select = []\n if pep8.options.ignore:\n pep8.options.ignore = pep8.options.ignore.split(\",\")\n elif pep8.options.select:\n # Ignore all checks which are not explicitly selected\n pep8.options.ignore = [\"\"]\n elif pep8.options.testsuite or pep8.options.doctest:\n # For doctest and testsuite, all checks are required\n pep8.options.ignore = []\n else:\n # The default choice: ignore controversial checks\n pep8.options.ignore = pep8.DEFAULT_IGNORE.split(\",\")\n pep8.options.physical_checks = pep8.find_checks(\"physical_line\")\n pep8.options.logical_checks = pep8.find_checks(\"logical_line\")\n pep8.options.counters = dict.fromkeys(pep8.BENCHMARK_KEYS, 0)\n pep8.options.messages = {}\n\n def run(self):\n with Pep8.lock:\n self.check_options()\n pep8.input_file(self.inputs[0].abspath())\n return 0 if not pep8.get_count() else -1\n\n\ndef options(opt):\n opt.add_option(\n \"-q\",\n \"--quiet\",\n default=0,\n action=\"count\",\n help=\"report only file names, or nothing with -qq\",\n )\n opt.add_option(\n \"-r\",\n \"--repeat\",\n action=\"store_true\",\n help=\"show all occurrences of the same error\",\n )\n opt.add_option(\n \"--exclude\",\n metavar=\"patterns\",\n default=pep8.DEFAULT_EXCLUDE,\n help=\"exclude files or directories which match these \"\n \"comma separated patterns (default: %s)\" % pep8.DEFAULT_EXCLUDE,\n dest=\"exclude\",\n )\n opt.add_option(\n \"--filename\",\n metavar=\"patterns\",\n default=\"*.py\",\n help=\"when parsing directories, only check filenames \"\n \"matching these comma separated patterns (default: \"\n \"*.py)\",\n )\n opt.add_option(\n \"--select\",\n metavar=\"errors\",\n default=\"\",\n help=\"select errors and warnings (e.g. E,W6)\",\n )\n opt.add_option(\n \"--ignore\",\n metavar=\"errors\",\n default=\"\",\n help=\"skip errors and warnings (e.g. E4,W)\",\n )\n opt.add_option(\n \"--show-source\", action=\"store_true\", help=\"show source code for each error\"\n )\n opt.add_option(\n \"--show-pep8\", action=\"store_true\", help=\"show text of PEP 8 for each error\"\n )\n opt.add_option(\n \"--statistics\", action=\"store_true\", help=\"count errors and warnings\"\n )\n opt.add_option(\n \"--count\",\n action=\"store_true\",\n help=\"print total number of errors and warnings \"\n \"to standard error and set exit code to 1 if \"\n \"total is not null\",\n )\n opt.add_option(\"--benchmark\", action=\"store_true\", help=\"measure processing speed\")\n opt.add_option(\"--testsuite\", metavar=\"dir\", help=\"run regression tests from dir\")\n opt.add_option(\"--doctest\", action=\"store_true\", help=\"run doctest on myself\")\n","sub_path":"docs/.mywaflib/waflib/extras/pep8.py","file_name":"pep8.py","file_ext":"py","file_size_in_byte":3964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"479229695","text":"from functools import wraps\n\nimport json\n\nfrom flask import Flask, Response, make_response, request\nfrom flask_restful import Api, Resource, reqparse\n\nfrom ..exceptions import TaskNotFoundError\nfrom ..base import DBprocess\nfrom ...config import config as c\n# from dxpy.api.urls import api_path\n\n\nclass TaskResource(Resource):\n def get(self, id):\n try:\n return Response(\n DBprocess.read(id), 200, mimetype=\"application/json\")\n except TaskNotFoundError as e:\n return str(e), 404\n\n def delete(self, id):\n try:\n return Response(\n DBprocess.delete(id), 200, mimetype=\"application/json\")\n except TaskNotFoundError as e:\n return str(e), 404\n\n \nclass TasksResource(Resource): \n def get(self):\n task_jsons = []\n DBprocess.read_all().subscribe(lambda t: task_jsons.append(t))\n return Response(\n json.dumps(task_jsons), 200, mimetype=\"application/json\")\n\n def post(self):\n task = request.form['task'] \n res = DBprocess.create(task)\n return Response(\n json.dumps({\n 'id': res\n }), 201, mimetype=\"application/json\")\n\n def put(self):\n try:\n task = request.form['task']\n return Response(\n DBprocess.update(task), 201, mimetype=\"application/json\")\n except TaskNotFoundError as e:\n return str(e), 404\n\ndef api_root(version):\n return \"/api/v{version}\".format(version=version)\n\ndef api_path(name, suffix=None, version=None, base=None):\n if base is None:\n base = api_root(version)\n else:\n if base.startswith('/'):\n base = base[1:]\n base = \"{root}/{base}\".format(root=api_root(version), base=base)\n \n if base.endswith('/'):\n base = base[:-1] \n if suffix is None:\n return \"{base}/{name}\".format(base=base, name=name)\n else:\n return \"{base}/{name}/{suffix}\".format(base=base, name=name, suffix=suffix)\n\ndef add_api(api): \n # print(api_path(c['name'], '', c['version'], c['base']))\n api.add_resource(TaskResource,\n api_path(c['name'], '', c['version'], c['base']))\n api.add_resource(TasksResource,\n api_path(c['names'], None, c['version'], c['base']))\n","sub_path":"src/python/dxl/cluster/database/api/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"160985541","text":"# Solves Navies-Stokes equation for flow around beam\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d.axes3d import Axes3D\nimport numpy as np\n\nprint(\"Working, look for the figure after 100 iterations\")\nNxmax = 70\nNymax = 20\nIL = 10\nH = 8\nT = 8\nh = 1.0\n\nu = np.zeros( (Nxmax+1, Nymax+1) ) # Stream\nw = np.zeros( (Nxmax+1, Nymax+1) ) # Vorticity\nV0 = 1.0\nomega = 0.1\nnu = 1.0\n\niter = 0\nR = V0*h/nu\n\ndef borders():\n for i in range(Nxmax+1):\n for j in range(Nymax+1):\n w[i,j] = 0.0\n u[i,j] = j*V0\n\n # Fluid surface\n for i in range(Nxmax+1):\n u[i,Nymax] = u[i,Nymax-1] + V0*h\n w[i,Nymax-1] = 0.0\n\n for j in range(Nymax+1):\n u[1,j] = u[0,j]\n w[0,j] = 0.0\n \n for i in range(Nxmax+1):\n if i <= IL and i >= IL+T:\n u[i,0] = 0.0\n w[i,0] = 0.0\n \n for j in range(1,Nymax):\n w[Nxmax,j] = w[Nxmax-1,j]\n u[Nxmax,j] = u[Nxmax-1,j]\n\n\ndef beam():\n # BC for beam\n for j in range(H+1): # Sides\n w[IL,j] = -2*u[IL-1,j]/(h*h) # Front\n w[IL+T,j] = -2*u[IL+T+1,j]/(h*h) # Back\n #\n for i in range(IL, IL+T+1):\n w[i,H-1] = -2*u[i,H]/(h*h)\n #\n for i in range(IL,IL+T+1):\n for j in range(H+1):\n u[IL,j] = 0.0 # Front\n u[IL+T,j] = 0.0 # Back\n u[i,H] = 0 # top\n\ndef relax():\n beam() # Reset\n for i in range(1,Nxmax): # Relax stream\n for j in range(1,Nymax):\n r1 = omega * ( ( u[i+1,j] + u[i-1,j] + u[i,j+1] + u[i,j-1] + h*h*w[i,j] )/4 - u[i,j] )\n u[i,j] += r1\n #\n for i in range(1,Nxmax): # Relax vorticity\n for j in range(1,Nymax):\n a1 = w [i+1,j] + w[i-1,j] + w[i,j +1] + w[i , j -1]\n a2 = ( u [i,j+1] - u[i,j-1] ) * (w[i+1,j] - w[i-1,j] )\n a3 = ( u [i+1,j] - u[i-1,j] ) * (w[i,j+1] - w[i,j-1] )\n r2 = omega * ( ( a1 - (R/4.0) * ( a2 - a3 ) )/4.0 - w[i,j] )\n w[i,j] += r2\n\n\nborders() \niiter = 0\nwhile(iiter <= 100):\n iiter += 1\n if iiter %10 == 0:\n print(iiter)\n relax()\n\nfor i in range(0,Nxmax+1):\n for j in range(0,Nymax+1) :\n u[i,j] = u[i,j]/V0/h\nx = np.arange(Nxmax-1)\ny = np.arange(Nymax-1)\nX, Y = np.meshgrid(x,y)\n\n# Stream flow\ndef functz(u):\n z = u[X,Y]\n return z\n\nZ = u[X,Y]\nfig = plt.figure()\nax = Axes3D(fig)\nax.plot_wireframe(X, Y, Z, color=\"r\")\nax.set_xlabel(\"X\")\nax.set_ylabel(\"Y\")\nax.set_zlabel(\"StreamFunction\")\nplt.savefig(\"IMG_beam_v1.pdf\")\n\n","sub_path":"landau_cp/beam_v1.py","file_name":"beam_v1.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"291802624","text":"from collections import defaultdict\nimport json\nfrom nose.tools import (\n assert_raises_regexp,\n assert_raises,\n eq_,\n set_trace,\n)\nfrom sqlalchemy.orm.exc import (\n MultipleResultsFound,\n NoResultFound,\n)\nfrom authentication_document import AuthenticationDocument\nfrom . import DatabaseTest\nfrom model import (\n Audience,\n Place,\n ServiceArea,\n)\nfrom util.problem_detail import ProblemDetail\nfrom problem_details import INVALID_INTEGRATION_DOCUMENT\nfrom testing import MockPlace\n\n# Alias for a long class name\nAuthDoc = AuthenticationDocument\n\nclass TestParseCoverage(DatabaseTest):\n\n EVERYWHERE = AuthenticationDocument.COVERAGE_EVERYWHERE\n\n def parse_places(self, coverage_object, expected_places=None,\n expected_unknown=None, expected_ambiguous=None):\n \"\"\"Call AuthenticationDocument.parse_coverage. Verify that the parsed\n list of places, as well as the dictionaries of unknown and\n ambiguous place names, are as expected.\n \"\"\"\n place_objs, unknown, ambiguous = AuthDoc.parse_coverage(\n self._db, coverage_object, MockPlace\n )\n empty = defaultdict(list)\n expected_places = expected_places or []\n expected_unknown = expected_unknown or empty\n expected_ambiguous = expected_ambiguous or empty\n # TODO PYTHON3 replace eq_sorted() with eq_()\n def eq_sorted(a, b):\n def key(x):\n return id(x)\n eq_(sorted(a, key=key), sorted(b, key=key))\n eq_sorted(expected_places, place_objs)\n eq_sorted(expected_unknown, unknown)\n eq_sorted(expected_ambiguous, ambiguous)\n\n def test_universal_coverage(self):\n # Test an authentication document that says a library covers the\n # whole universe.\n self.parse_places(\n self.EVERYWHERE, [MockPlace.EVERYWHERE]\n )\n\n def test_entire_country(self):\n # Test an authentication document that says a library covers an\n # entire country.\n us = MockPlace()\n MockPlace.by_name[\"US\"] = us\n self.parse_places(\n {\"US\": self.EVERYWHERE },\n expected_places=[us]\n )\n\n def test_ambiguous_country(self):\n # Test the unlikely scenario where an authentication document says a\n # library covers an entire country, but it's ambiguous which\n # country is being referred to.\n\n canada = MockPlace()\n MockPlace.by_name[\"CA\"] = canada\n MockPlace.by_name[\"Europe I think?\"] = MockPlace.AMBIGUOUS\n self.parse_places(\n {\"Europe I think?\": self.EVERYWHERE, \"CA\": self.EVERYWHERE },\n expected_places=[canada],\n expected_ambiguous={\"Europe I think?\": self.EVERYWHERE}\n )\n\n def test_unknown_country(self):\n # Test an authentication document that says a library covers an\n # entire country, but the library registry doesn't know anything about\n # that country's geography.\n\n canada = MockPlace()\n MockPlace.by_name[\"CA\"] = canada\n self.parse_places(\n {\"Memory Alpha\": self.EVERYWHERE, \"CA\": self.EVERYWHERE },\n expected_places=[canada],\n expected_unknown={\"Memory Alpha\": self.EVERYWHERE}\n )\n\n def test_places_within_country(self):\n # Test an authentication document that says a library\n # covers one or more places within a country.\n\n # This authentication document covers two places called\n # \"San Francisco\" (one in the US and one in Mexico) as well as a\n # place called \"Mexico City\" in Mexico.\n #\n # Note that it's invalid to map a country name to a single\n # place name (it's supposed to always be a list), but our\n # parser can handle it.\n doc = {\"US\": \"San Francisco\", \"MX\": [\"San Francisco\", \"Mexico City\"]}\n\n place1 = MockPlace()\n place2 = MockPlace()\n place3 = MockPlace()\n place4 = MockPlace()\n us = MockPlace(inside={\"San Francisco\": place1, \"San Jose\": place2})\n mx = MockPlace(inside={\"San Francisco\": place3, \"Mexico City\": place4})\n MockPlace.by_name[\"US\"] = us\n MockPlace.by_name[\"MX\"] = mx\n\n # AuthenticationDocument.parse_coverage is able to turn those\n # three place names into place objects.\n self.parse_places(\n doc,\n expected_places=[place1, place3, place4]\n )\n\n def test_ambiguous_place_within_country(self):\n # Test an authentication document that names an ambiguous\n # place within a country.\n us = MockPlace(inside={\"Springfield\": MockPlace.AMBIGUOUS})\n MockPlace.by_name[\"US\"] = us\n\n self.parse_places(\n {\"US\": [\"Springfield\"]},\n expected_ambiguous={\"US\": [\"Springfield\"]}\n )\n\n def test_unknown_place_within_country(self):\n # Test an authentication document that names an unknown\n # place within a country.\n sf = MockPlace()\n us = MockPlace(inside={\"San Francisco\": sf})\n MockPlace.by_name[\"US\"] = us\n\n self.parse_places(\n {\"US\": \"Nowheresville\"},\n expected_unknown={\"US\": [\"Nowheresville\"]}\n )\n\n def test_unscoped_place_is_in_default_nation(self):\n # Test an authentication document that names places without\n # saying which nation they're in.\n ca = MockPlace()\n ut = MockPlace()\n\n # Without a default nation on the server side, we can't make\n # sense of these place names.\n self.parse_places(\"CA\", expected_unknown={\"??\": \"CA\"})\n\n self.parse_places(\n [\"CA\", \"UT\"], expected_unknown={\"??\": [\"CA\", \"UT\"]}\n )\n\n us = MockPlace(inside={\"CA\": ca, \"UT\": ut})\n us.abbreviated_name = \"US\"\n MockPlace.by_name[\"US\"] = us\n\n # With a default nation in place, a bare string like \"CA\"\n # is treated the same as a correctly formatted dictionary\n # like {\"US\": [\"CA\"]}\n MockPlace._default_nation = us\n self.parse_places(\"CA\", expected_places=[ca])\n self.parse_places([\"CA\", \"UT\"], expected_places=[ca, ut])\n\n MockPlace._default_nation = None\n\n\nclass TestLinkExtractor(object):\n \"\"\"Test the _extract_link helper method.\"\"\"\n\n def test_no_matching_link(self):\n links = [dict(rel=\"alternate\", href=\"http://foo/\", type=\"text/html\")]\n\n # There is no link with the given relation.\n eq_(None, AuthDoc._extract_link(links, rel='self'))\n\n # There is a link with the given relation, but the type is wrong.\n eq_(\n None,\n AuthDoc._extract_link(\n links, 'alternate', require_type=\"text/plain\"\n )\n )\n\n\n def test_prefer_type(self):\n \"\"\"Test that prefer_type holds out for the link you're\n looking for.\n \"\"\"\n first_link = dict(\n rel=\"alternate\", href=\"http://foo/\", type=\"text/html\"\n )\n second_link = dict(\n rel=\"alternate\", href=\"http://bar/\",\n type=\"text/plain;charset=utf-8\"\n )\n links = [first_link, second_link]\n\n # We would prefer the second link.\n eq_(second_link,\n AuthDoc._extract_link(\n links, 'alternate', prefer_type=\"text/plain\"\n )\n )\n\n # We would prefer the first link.\n eq_(first_link,\n AuthDoc._extract_link(\n links, 'alternate', prefer_type=\"text/html\"\n )\n )\n\n # The type we prefer is not available, so we get the first link.\n eq_(first_link,\n AuthDoc._extract_link(\n links, 'alternate', prefer_type=\"application/xhtml+xml\"\n )\n )\n\n def test_empty_document(self):\n \"\"\"Provide an empty Authentication For OPDS document to test\n default values.\n \"\"\"\n place = MockPlace()\n everywhere = place.everywhere(None)\n parsed = AuthDoc.from_string(None, \"{}\", place)\n\n # In the absence of specific information, it's assumed the\n # OPDS server is open to everyone.\n eq_(([everywhere], {}, {}), parsed.service_area)\n eq_(([everywhere], {}, {}), parsed.focus_area)\n eq_([parsed.PUBLIC_AUDIENCE], parsed.audiences)\n\n eq_(None, parsed.id)\n eq_(None, parsed.title)\n eq_(None, parsed.service_description)\n eq_(None, parsed.color_scheme)\n eq_(None, parsed.collection_size)\n eq_(None, parsed.public_key)\n eq_(None, parsed.website)\n eq_(False, parsed.online_registration)\n eq_(None, parsed.root)\n eq_([], parsed.links)\n eq_(None, parsed.logo)\n eq_(None, parsed.logo_link)\n eq_(False, parsed.anonymous_access)\n\n def test_real_document(self):\n \"\"\"Test an Authentication For OPDS document that demonstrates\n most of the features we're looking for.\n \"\"\"\n document = {\n \"id\": \"http://library/authentication-for-opds-file\",\n \"title\": \"Ansonia Public Library\",\n \"links\": [\n {\"rel\": \"logo\", \"href\": \"data:image/png;base64,some-image-data\", \"type\": \"image/png\"},\n {\"rel\": \"alternate\", \"href\": \"http://ansonialibrary.org\", \"type\": \"text/html\"},\n {\"rel\": \"register\", \"href\": \"http://example.com/get-a-card/\", \"type\": \"text/html\"},\n {\"rel\": \"start\", \"href\": \"http://catalog.example.com/\", \"type\": \"text/html/\"},\n {\"rel\": \"start\", \"href\": \"http://opds.example.com/\", \"type\": \"application/atom+xml;profile=opds-catalog\"}\n ],\n \"service_description\": \"Serving Ansonia, CT\",\n \"color_scheme\": \"gold\",\n \"collection_size\": {\"eng\": 100, \"spa\": 20},\n \"public_key\": \"a public key\",\n \"features\": {\"disabled\": [], \"enabled\": [\"https://librarysimplified.org/rel/policy/reservations\"]},\n \"authentication\": [\n {\n \"type\": \"http://opds-spec.org/auth/basic\",\n \"description\": \"Log in with your library barcode\",\n \"inputs\": {\"login\": {\"keyboard\": \"Default\"},\n \"password\": {\"keyboard\": \"Default\"}},\n \"labels\": {\"login\": \"Barcode\", \"password\": \"PIN\"}\n }\n ]\n }\n\n place = MockPlace()\n everywhere = place.everywhere(None)\n parsed = AuthDoc.from_dict(None, document, place)\n\n # Information about the OPDS server has been extracted from\n # JSON and put into the AuthenticationDocument object.\n eq_(\"http://library/authentication-for-opds-file\", parsed.id)\n eq_(\"Ansonia Public Library\", parsed.title)\n eq_(\"Serving Ansonia, CT\", parsed.service_description)\n eq_(\"gold\", parsed.color_scheme)\n eq_({\"eng\": 100, \"spa\": 20}, parsed.collection_size)\n eq_(\"a public key\", parsed.public_key)\n eq_({u'rel': 'alternate', u'href': u'http://ansonialibrary.org',\n u'type': u'text/html'},\n parsed.website)\n eq_(True, parsed.online_registration)\n eq_({\"rel\": \"start\", \"href\": \"http://opds.example.com/\", \"type\": \"application/atom+xml;profile=opds-catalog\"}, parsed.root)\n eq_(\"data:image/png;base64,some-image-data\", parsed.logo)\n eq_(None, parsed.logo_link)\n eq_(False, parsed.anonymous_access)\n\n def online_registration_for_one_authentication_mechanism(self):\n \"\"\"An OPDS server offers online registration if _any_ of its\n authentication flows offer online registration.\n\n It also works if the server itself offers registration (see\n previous test).\n \"\"\"\n document = {\n \"authentication\": [\n {\n \"description\": \"You'll never guess the secret code.\",\n \"type\": \"http://opds-spec.org/auth/basic\"\n },\n {\n \"description\": \"But anyone can get a library card.\",\n \"type\": \"http://opds-spec.org/auth/basic\",\n \"links\": [\n { \"rel\": \"register\",\n \"href\": \"http://get-a-library-card/\"\n }\n ]\n }\n ]\n }\n eq_(True, parsed.online_registration)\n\n\n def test_name_treated_as_title(self):\n \"\"\"Some invalid documents put the library name in 'name' instead of title.\n We can handle these documents.\n \"\"\"\n document = dict(name=\"My library\")\n auth = AuthDoc.from_dict(None, document, MockPlace())\n eq_(\"My library\", auth.title)\n\n def test_logo_link(self):\n \"\"\"You can link to your logo, instead of including it in the\n document.\n \"\"\"\n document = {\n \"links\": [\n dict(rel=\"logo\", href=\"http://logo.com/logo.jpg\")\n ]\n }\n auth = AuthDoc.from_dict(None, document, MockPlace())\n eq_(None, auth.logo)\n eq_({\"href\": \"http://logo.com/logo.jpg\", \"rel\": \"logo\"}, auth.logo_link)\n\n def test_audiences(self):\n \"\"\"You can specify the target audiences for your OPDS server.\"\"\"\n document = {\"audience\": [\"educational-secondary\", \"research\"]}\n auth = AuthDoc.from_dict(None, document, MockPlace())\n eq_([\"educational-secondary\", \"research\"], auth.audiences)\n\n def test_anonymous_access(self):\n \"\"\"You can signal that your OPDS server allows anonymous access by\n including it as an authentication type.\n \"\"\"\n document = dict(authentication=[\n dict(type=\"http://opds-spec.org/auth/basic\"),\n dict(type=\"https://librarysimplified.org/rel/auth/anonymous\")\n ])\n auth = AuthDoc.from_dict(None, document, MockPlace())\n eq_(True, auth.anonymous_access)\n\n\nclass TestUpdateServiceAreas(DatabaseTest):\n\n def test_set_service_areas(self):\n # Test the method that replaces a Library's ServiceAreas.\n m = AuthenticationDocument.set_service_areas\n\n library = self._library()\n p1 = self._place()\n p2 = self._place()\n\n def eligibility_areas():\n return [x.place for x in library.service_areas\n if x.type==ServiceArea.ELIGIBILITY]\n\n def focus_areas():\n return [x.place for x in library.service_areas\n if x.type==ServiceArea.FOCUS]\n\n # Try a successful case.\n p1_only = [[p1], {}, {}]\n p2_only = [[p2], {}, {}]\n m(library, p1_only, p2_only)\n eq_([p1], eligibility_areas())\n eq_([p2], focus_areas())\n\n # If you pass in two empty inputs, no changes are made.\n empty = [[], {}, {}]\n m(library, empty, empty)\n eq_([p1], eligibility_areas())\n eq_([p2], focus_areas())\n\n # If you pass only one value, the focus area is set to that\n # value and the eligibility area is cleared out.\n m(library, p1_only, empty)\n eq_([], eligibility_areas())\n eq_([p1], focus_areas())\n\n m(library, empty, p2_only)\n eq_([], eligibility_areas())\n eq_([p2], focus_areas())\n\n\n def test_known_place_becomes_servicearea(self):\n \"\"\"Test the helper method in a successful case.\"\"\"\n library = self._library()\n\n # We identified two places, with no ambiguous or unknown\n # places.\n p1 = self._place()\n p2 = self._place()\n valid = [p1, p2]\n ambiguous = []\n unknown = []\n\n areas = []\n\n # This will use those places to create new ServiceAreas,\n # which will be gathered in the 'areas' array.\n problem = AuthenticationDocument._update_service_areas(\n library, [valid, unknown, ambiguous], ServiceArea.FOCUS,\n areas\n )\n eq_(None, problem)\n\n [a1, a2] = sorted(library.service_areas, key = lambda x: x.place_id)\n eq_(p1, a1.place)\n eq_(ServiceArea.FOCUS, a1.type)\n\n eq_(p2, a2.place)\n eq_(ServiceArea.FOCUS, a2.type)\n\n # The ServiceArea IDs were added to the `ids` list.\n eq_(set([a1, a2]), set(areas))\n\n\n def test_ambiguous_and_unknown_places_become_problemdetail(self):\n \"\"\"Test the helper method in a case that ends in failure.\"\"\"\n library = self._library()\n\n # We were able to identify one valid place.\n valid = [self._place()]\n\n # But we also found unknown and ambiguous places.\n ambiguous = [\"Ambiguous\"]\n unknown = [\"Unknown 1\", \"Unknown 2\"]\n\n ids = []\n problem = AuthenticationDocument._update_service_areas(\n library, [valid, unknown, ambiguous], ServiceArea.ELIGIBILITY,\n ids\n )\n\n # We got a ProblemDetail explaining the problem\n assert isinstance(problem, ProblemDetail)\n eq_(INVALID_INTEGRATION_DOCUMENT.uri, problem.uri)\n eq_(\n 'The following service area was unknown: [\"Unknown 1\", \"Unknown 2\"]. The following service area was ambiguous: [\"Ambiguous\"].',\n problem.detail\n )\n\n # No IDs were added to the list.\n eq_([], ids)\n\n def test_update_service_areas(self):\n\n # This Library has no ServiceAreas associated with it.\n library = self._library()\n\n country1 = self._place(abbreviated_name=\"C1\", type=Place.NATION)\n country2 = self._place(abbreviated_name=\"C2\", type=Place.NATION)\n\n everywhere = AuthenticationDocument.COVERAGE_EVERYWHERE\n doc_dict = dict(\n service_area=everywhere,\n focus_area = { country1.abbreviated_name : everywhere,\n country2.abbreviated_name : everywhere }\n )\n doc = AuthenticationDocument.from_dict(self._db, doc_dict)\n problem = doc.update_service_areas(library)\n self._db.commit()\n eq_(None, problem)\n\n # Now this Library has three associated ServiceAreas.\n [a1, a2, a3] = sorted(\n [(x.type, x.place.abbreviated_name)\n for x in library.service_areas]\n )\n everywhere_place = Place.everywhere(self._db)\n\n # Anyone is eligible for access.\n eq_(('eligibility', everywhere_place.abbreviated_name), a1)\n\n # But people in two particular countries are the focus.\n eq_(('focus', country1.abbreviated_name), a2)\n eq_(('focus', country2.abbreviated_name), a3)\n\n # Remove one of the countries from the focus, add a new one,\n # and try again.\n country3 = self._place(abbreviated_name=\"C3\", type=Place.NATION)\n doc_dict = dict(\n service_area=everywhere,\n focus_area = { country1.abbreviated_name : everywhere,\n country3.abbreviated_name : everywhere }\n )\n doc = AuthenticationDocument.from_dict(self._db, doc_dict)\n doc.update_service_areas(library)\n self._db.commit()\n\n # The ServiceArea for country #2 has been removed.\n assert a2 not in library.service_areas\n assert not any(a.place == country2 for a in library.service_areas)\n\n [a1, a2, a3] = sorted(\n [(x.type, x.place.abbreviated_name)\n for x in library.service_areas]\n )\n eq_(('eligibility', everywhere_place.abbreviated_name), a1)\n eq_(('focus', country1.abbreviated_name), a2)\n eq_(('focus', country3.abbreviated_name), a3)\n\n def test_service_area_registered_as_focus_area_if_no_focus_area(self):\n\n library = self._library()\n # Create an authentication document that defines service_area\n # but not focus_area.\n everywhere = AuthenticationDocument.COVERAGE_EVERYWHERE\n doc_dict = dict(service_area=everywhere)\n doc = AuthenticationDocument.from_dict(self._db, doc_dict)\n problem = doc.update_service_areas(library)\n self._db.commit()\n eq_(None, problem)\n\n # We have a focus area but no explicit eligibility area. This\n # means that the library's eligibility area and focus area are\n # the same.\n [area] = library.service_areas\n eq_(Place.EVERYWHERE, area.place.type)\n eq_(ServiceArea.FOCUS, area.type)\n\n\n def test_service_area_registered_as_focus_area_if_identical_to_focus_area(self):\n library = self._library()\n\n # Create an authentication document that defines service_area\n # and focus_area as the same value.\n everywhere = AuthenticationDocument.COVERAGE_EVERYWHERE\n doc_dict = dict(\n service_area=everywhere,\n focus_area=everywhere,\n )\n doc = AuthenticationDocument.from_dict(self._db, doc_dict)\n problem = doc.update_service_areas(library)\n self._db.commit()\n eq_(None, problem)\n\n # Since focus area and eligibility area are the same, only the\n # focus area was registered.\n [area] = library.service_areas\n eq_(Place.EVERYWHERE, area.place.type)\n eq_(ServiceArea.FOCUS, area.type)\n\n\nclass TestUpdateAudiences(DatabaseTest):\n\n def setup(self):\n super(TestUpdateAudiences, self).setup()\n self.library = self._library()\n\n def update(self, audiences):\n \"\"\"Wrapper around AuthenticationDocument._update_audiences.\"\"\"\n result = AuthenticationDocument._update_audiences(\n self.library, audiences\n )\n\n # If there's a problem detail document, it must be of the type\n # INVALID_INTEGRATION_DOCUMENT. The caller may perform additional\n # checks.\n if isinstance(result, ProblemDetail):\n eq_(result.uri, INVALID_INTEGRATION_DOCUMENT.uri)\n return result\n\n def test_update_audiences(self):\n\n # Set the library's audiences.\n audiences = [Audience.EDUCATIONAL_SECONDARY, Audience.RESEARCH]\n doc_dict = dict(audience=audiences)\n doc = AuthenticationDocument.from_dict(self._db, doc_dict)\n problem = doc.update_audiences(self.library)\n eq_(None, problem)\n eq_(set(audiences), set([x.name for x in self.library.audiences]))\n\n # Set them again to different but partially overlapping values.\n audiences = [\n Audience.EDUCATIONAL_PRIMARY, Audience.EDUCATIONAL_SECONDARY\n ]\n problem = self.update(audiences)\n eq_(set(audiences), set([x.name for x in self.library.audiences]))\n\n def test_update_audiences_to_invalid_value(self):\n # You're not supposed to specify a single string as `audience`,\n # but we can handle it.\n audience = Audience.EDUCATIONAL_PRIMARY\n problem = self.update(audience)\n eq_([audience], [x.name for x in self.library.audiences])\n\n # But you can't specify some other random object.\n value = dict(k=\"v\")\n problem = self.update(value)\n eq_(u\"'audience' must be a list: %r\" % value, problem.detail)\n\n def test_unrecognized_audiences_become_other(self):\n # If you specify an audience that we don't recognize, it becomes\n # Audience.OTHER.\n audiences = [\"Some random audience\", Audience.PUBLIC]\n self.update(audiences)\n eq_(set([Audience.OTHER, Audience.PUBLIC]),\n set([x.name for x in self.library.audiences]))\n\n def test_audience_defaults_to_public(self):\n # If a library doesn't specify its audience, we assume it's open\n # to the general public.\n self.update(None)\n eq_([Audience.PUBLIC], [x.name for x in self.library.audiences])\n\n\nclass TestUpdateCollectionSize(DatabaseTest):\n\n def setup(self):\n super(TestUpdateCollectionSize, self).setup()\n self.library = self._library()\n\n def update(self, value):\n result = AuthenticationDocument._update_collection_size(\n self.library, value\n )\n # If there's a problem detail document, it must be of the type\n # INVALID_INTEGRATION_DOCUMENT. The caller may perform additional\n # checks.\n if isinstance(result, ProblemDetail):\n eq_(result.uri, INVALID_INTEGRATION_DOCUMENT.uri)\n return result\n\n def test_success(self):\n sizes = dict(eng=100, jpn=0)\n doc_dict = dict(collection_size=sizes)\n doc = AuthenticationDocument.from_dict(self._db, doc_dict)\n problem = doc.update_collection_size(self.library)\n eq_(None, problem)\n\n # Two CollectionSummaries have been created, for the English\n # collection and the (empty) Japanese collection.\n eq_([(u'eng', 100), (u'jpn', 0)],\n sorted([(x.language, x.size) for x in self.library.collections]))\n\n # Update the library with new data.\n self.update({\"eng\": \"200\"})\n # The Japanese collection has been removed altogether, since\n # it was not mentioned in the input.\n [english] = self.library.collections\n eq_(\"eng\", english.language)\n eq_(200, english.size)\n\n self.update(None)\n # Now both collections have been removed.\n eq_([], self.library.collections)\n\n def test_single_collection(self):\n # Register a single collection not differentiated by language.\n self.update(100)\n\n [unknown] = self.library.collections\n eq_(None, unknown.language)\n eq_(100, unknown.size)\n\n # A string will also work.\n self.update(\"51\")\n\n [unknown] = self.library.collections\n eq_(None, unknown.language)\n eq_(51, unknown.size)\n\n def test_unknown_language_registered_as_unknown(self):\n self.update(dict(mmmmm=100))\n [unknown] = self.library.collections\n eq_(None, unknown.language)\n eq_(100, unknown.size)\n\n # Here's a tricky case with multiple unknown languages. They\n # all get grouped together into a single 'unknown language'\n # collection.\n self.update({None: 100, \"mmmmm\":200, \"zzzzz\":300})\n [unknown] = self.library.collections\n eq_(None, unknown.language)\n eq_(100+200+300, unknown.size)\n\n def test_invalid_collection_size(self):\n problem = self.update([1,2,3])\n eq_(\"'collection_size' must be a number or an object mapping language codes to numbers\", problem.detail)\n\n def test_negative_collection_size(self):\n problem = self.update(-100)\n eq_(\"Collection size cannot be negative.\", problem.detail)\n","sub_path":"tests/test_authentication_document.py","file_name":"test_authentication_document.py","file_ext":"py","file_size_in_byte":26433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"436650928","text":"import socket, string, time, math, base64, zlib\r\nSERVER = 'irc.root-me.org'\r\nPORT = 6667\r\nNICKNAME = 'Ck4aM-bot1'\r\nCHANNEL = '#root-me_challenge'\r\n\r\n\r\ndef main():\r\n global IRC\r\n IRC = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n IRC.connect((SERVER, PORT))\r\n Listener()\r\n IRC.close()\r\n\r\n\r\ndef send_data(command):\r\n IRC.send(bytes(command + '\\n', 'UTF-8'))\r\n\r\n\r\ndef joinchan(chan):\r\n send_data('JOIN ' + chan)\r\n ircmsg = \"\"\r\n while ircmsg.find(\"is now your displayed host\") == -1:\r\n ircmsg = IRC.recv(2048).decode(\"UTF-8\")\r\n # DEBUG # print(ircmsg)\r\n\r\n\r\ndef rot13(phrase):\r\n key = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n val = \"nopqrstuvwxyzabcdefghijklmNOPQRSTUVWXYZABCDEFGHIJKLM\"\r\n transform = dict(zip(key, val))\r\n return ''.join(transform.get(char, char) for char in phrase)\r\n\r\n\r\ndef Listener():\r\n send_data(\"USER \" + NICKNAME + \" \" + NICKNAME + \" \" + NICKNAME + \" \" + NICKNAME)\r\n send_data('NICK ' + NICKNAME)\r\n joinchan(CHANNEL)\r\n send_data('PRIVMSG candy !ep4')\r\n while (1):\r\n ircmsg = IRC.recv(2048).decode(\"UTF-8\")\r\n # DEBUG # print(ircmsg)\r\n data = ircmsg.split(':')[2]\r\n decoded = zlib.decompress(base64.b64decode(data)).decode(\"UTF-8\")\r\n IRC.send(bytes(\"PRIVMSG candy !ep4 -rep %s\" % decoded + '\\n', 'UTF-8'))\r\n ircmsg = IRC.recv(2048).decode(\"UTF-8\")\r\n print('Password: %s' % ircmsg.split('password ')[1])\r\n\r\n\r\nmain()\r\n","sub_path":"rootme-prog-4.py","file_name":"rootme-prog-4.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"272753623","text":"__author__ = 'jeff'\n\nfrom utils import *\nfrom cfg import *\nimport unittest\n\n\nclass TestDomain(unittest.TestCase):\n\n dtu = None\n\n def setUp(self):\n self.dtu = DesignateUtil(ADMIN_USER)\n\n def tearDown(self):\n self.dtu.clean_up()\n\n \"\"\"\n API calls to cover for v1\n\n Domains\n Create Domain\n Get a Domain\n Update a Domain\n Delete a Domain\n Get Servers Hosting a Domain\n List Domains\n \"\"\"\n\n \"\"\" Create Tests \"\"\"\n def test_create_domain(self):\n \"\"\"Create a valid domain and check it exists\"\"\"\n try:\n dapi = self.dtu.get_domain_api(ADMIN_USER)\n domain = self.dtu.create_domain(dapi, '')\n dapi.get({'id': domain['id']})\n except NotFound:\n self.fail('Failed to create Domain')\n except Forbidden:\n self.fail('Failed to create Domain')\n\n def test_create_domain_dup(self):\n \"\"\"Create a duplicate domain - expect failure\"\"\"\n try:\n dapi = self.dtu.get_domain_api(ADMIN_USER)\n domain = self.dtu.create_domain(dapi, '')\n domain2 = self.dtu.create_domain(dapi, domain['name'])\n dapi.get({'id': domain2['id']})\n self.fail('Failed - Domain with duplicated name created %s ' % domain.name)\n except Conflict:\n \"\"\" Success by failure \"\"\"\n\n def test_create_domain_badcred(self):\n \"\"\"Create a domain using tenant credentials - expect failure\"\"\"\n try:\n dapi = self.dtu.get_domain_api(USER2)\n self.dtu.create_domain(dapi, '')\n except Forbidden:\n \"\"\" Success by failure \"\"\"\n except Unauthorized:\n \"\"\" Success by failure \"\"\"\n\n \"\"\" Update Tests \"\"\"\n def test_update_domain_fail_rename(self):\n \"\"\"Update a domain using admin credentials\"\"\"\n try:\n dapi = self.dtu.get_domain_api(USER1)\n domain = self.dtu.create_domain(dapi, '')\n domain_name = self.dtu.get_unique_name(8, \".test.com.\")\n options = {'id': domain['id'], 'data_json': {'name': domain_name}}\n dapi.update(options)\n self.fail('Failed to catch rename of Domain')\n except BadRequest:\n \"\"\" Success by failure \"\"\"\n\n def test_update_domain(self):\n \"\"\"Update a domain using admin credentials\"\"\"\n try:\n dapi = self.dtu.get_domain_api(USER1)\n domain = self.dtu.create_domain(dapi, '')\n email = \"changed@testemail.com\"\n options = {'id': domain['id'], 'data_json': {'email': email}}\n dapi.update(options)\n domain2 = dapi.get({'id': domain['id']})\n self.assertEqual(domain2['email'], email, \"Failed to update domain (email)\")\n except KeyError:\n self.fail('Failed to create Domain')\n except BadRequest:\n self.fail('Failed to update Domain')\n\n def test_update_domain_badcred(self):\n \"\"\"Update a domain using tenant credentials - expect failure\"\"\"\n try:\n dapi = self.dtu.get_domain_api(USER1)\n domain = self.dtu.create_domain(dapi, '')\n dapi2 = self.dtu.get_domain_api(USER2)\n email = \"changed@testemail.com\"\n options = {'id': domain['id'], 'data_json': {'email': email}}\n dapi2.update(options)\n self.fail(\"System allowed domain update by non-admin\")\n except Forbidden:\n \"\"\" Success by Failure \"\"\"\n except NotFound:\n \"\"\" Success by Failure \"\"\"\n except Unauthorized:\n \"\"\" Success by failure \"\"\"\n\n \"\"\" List Tests \"\"\"\n def test_list_domains(self):\n \"\"\"Get list of current domains\"\"\"\n dapi = self.dtu.get_domain_api(USER1)\n domain_ids = []\n for x in xrange(3):\n domain = self.dtu.create_domain(dapi, '')\n domain_ids.append(domain['id'])\n\n for d_id in domain_ids:\n try:\n dapi.get({'id': d_id})\n except NotFound:\n self.fail('Failed to find domain in list')\n\n def test_domain_server_list(self):\n \"\"\"Create a valid domain and check it exists\"\"\"\n try:\n dapi = self.dtu.get_domain_api(USER1)\n domain = self.dtu.create_domain(dapi, '')\n dapi.get({'id': domain['id']})\n servers = dapi.list_servers(domain['id'])\n if len(servers) == 0:\n self.fail('No domains return for test_domain_server_list')\n except NotFound:\n self.fail('Failed to list Domain servers')\n except Forbidden:\n self.fail('Failed to list Domain servers')\n\n \"\"\" Delete Tests \"\"\"\n def test_delete_domain(self):\n \"\"\"Delete a domain using admin credentials\"\"\"\n try:\n dapi = self.dtu.get_domain_api(USER1)\n domain = self.dtu.create_domain(dapi, '')\n dapi.delete({'id': domain['id']})\n dapi.get({'id': domain['id']})\n self.fail('Failed to delete Domain named %s ' % domain.name)\n except NotFound:\n \"\"\" Success by Failure \"\"\"\n\n def test_delete_domain_none(self):\n \"\"\"Delete a domain that does not exist - expect failure\"\"\"\n try:\n dapi = self.dtu.get_domain_api(USER1)\n dapi.delete({'id': self.dtu.get_unique_name(8)})\n self.fail(\"Delete of non-existant domain not caught\")\n except NotFound:\n \"\"\" Success by Failure \"\"\"\n\n def test_delete_domain_badcred(self):\n \"\"\"Delete a domain using tenant credentials - expect failure\"\"\"\n try:\n dapi = self.dtu.get_domain_api(USER1)\n domain = self.dtu.create_domain(dapi, '')\n dapi2 = self.dtu.get_domain_api(USER2)\n dapi2.delete({'id': domain['id']})\n self.fail('Allowed Tenant to delete Domain')\n except Forbidden:\n \"\"\" Success by Failure \"\"\"\n except NotFound:\n \"\"\" Success by Failure \"\"\"\n except Unauthorized:\n \"\"\" Success by failure \"\"\"\n\n def get_test_suite(self):\n \"\"\"\n Gather all the tests from this module in a test suite.\n \"\"\"\n test_suite = unittest.TestSuite()\n test_suite.addTest(unittest.makeSuite(self))\n return test_suite\n\n\ndef run_server_suite():\n test_suite = TestDomain.get_test_suite()\n runner = unittest.TextTestRunner()\n runner.run(test_suite)","sub_path":"test_domain.py","file_name":"test_domain.py","file_ext":"py","file_size_in_byte":6486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"593387244","text":"# encoding: utf-8\nfrom django import forms\n\n\nclass StandardMatchForm(forms.Form):\n number_of_players = forms.IntegerField()\n\n def clean_number_of_players(self):\n if self.cleaned_data[\"number_of_players\"] <= 1:\n raise forms.ValidationError(\"La partida debe tener\\\n al menos 2 jugadores\")\n return self.cleaned_data[\"number_of_players\"]\n\n\nclass AlternativeMatchForm(forms.Form):\n players = forms.IntegerField()\n patrols = forms.IntegerField()\n frigates = forms.IntegerField()\n carriers = forms.IntegerField()\n board_size = forms.IntegerField()\n submarines = forms.IntegerField()\n battleships = forms.IntegerField()\n\n def clean_players(self):\n if self.cleaned_data[\"players\"] <= 1:\n raise forms.ValidationError(\"La partida debe tener\\\n al menos 2 jugadores\")\n return self.cleaned_data[\"players\"]\n\n\nclass ShipForm(forms.Form):\n SHIP_TYPES = (\n (None, 'tipo de barco'),\n (0, 'portaaviones'),\n (1, 'acorazado'),\n (2, 'fragata'),\n (3, 'submarino'),\n (4, 'bote de patrulla'),\n )\n\n ORIENTATION_H = True\n ORIENTATION_V = False\n ORIENTATION_CHOICES = (\n (None, 'orientación'),\n (ORIENTATION_H, 'horizontal'),\n (ORIENTATION_V, 'vertical')\n )\n\n match = forms.IntegerField()\n coord_x = forms.IntegerField()\n coord_y = forms.IntegerField()\n ship_type = forms.ChoiceField(choices=SHIP_TYPES)\n orientation = forms.ChoiceField(choices=ORIENTATION_CHOICES)\n\n def clean_orientation(self):\n orientation = self.cleaned_data[\"orientation\"]\n if orientation in [\"True\", \"False\"]:\n return orientation == \"True\"\n else:\n raise forms.ValidationError(\n \"Debe seleccionar una orientación válida\")\n\n def clean_ship_type(self):\n if self.cleaned_data[\"ship_type\"] != 'None':\n type = int(self.cleaned_data[\"ship_type\"])\n if not type in xrange(5):\n raise forms.ValidationError(\n \"Debe seleccionar un tipo de barco válido\")\n return type\n else:\n raise forms.ValidationError(\"Debe seleccionar un tipo de barco\")\n","sub_path":"sucks/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"355331830","text":"# %load q07_extras/build.py\n# Default Imports\nfrom greyatomlib.python_getting_started.q01_read_data.build import read_data\ndata = read_data()\n\n# Your Solution\ndef extras_runs(data=data):\n extra1=[]\n extra2=[]\n \n inf1=data['innings'][0]\n for k,v in inf1.items():\n for delivery in v['deliveries']:\n for a,b in delivery.items():\n if 'extras' in b.keys():\n extra1.append(b['runs']['extras'])\n e1=len(extra1)\n \n inf2=data['innings'][1]\n for k,v in inf2.items():\n for delivery in v['deliveries']:\n for a,b in delivery.items():\n if 'extras' in b.keys():\n extra2.append(b['runs']['extras'])\n e2=len(extra2)\n \n difference=e2-e1\n \n \n\n \n \n \n\n \n \n \n\n # Write your code here\n\n\n ###difference =\n\n\n return difference\n\n\n\n","sub_path":"q07_extras/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"23477593","text":"\"\"\"file_tools: All tools to load and save data\n\n##################################\n\n 2018 01 31 Included Nion Swift files to be opened\n major revision 2020 09 to include sidpy and pyNSID data formats\n 2022 change to ase format for structures: this changed the default unit of length to Angstrom!!!\n\n##################################\n\"\"\"\n\nimport numpy as np\nimport h5py\nimport os\nimport pickle\n\n# For structure files of various flavor for instance POSCAR and other theory packages\nimport ase.io\n\n# =============================================\n# Include pycroscopy libraries #\n# =============================================\nimport SciFiReaders\nimport pyNSID\nimport sidpy\nimport ipywidgets as widgets\nfrom IPython.display import display\n\n# =============================================\n# Include pyTEMlib libraries #\n# =============================================\nimport pyTEMlib.crystal_tools\nfrom pyTEMlib.config_dir import config_path\nfrom pyTEMlib.sidpy_tools import *\n\nfrom pyTEMlib.sidpy_tools import *\n\nQt_available = True\ntry:\n from PyQt5 import QtCore, QtWidgets, QtGui\nexcept ModuleNotFoundError:\n print('Qt dialogs are not available')\n Qt_available = False\n\nDimension = sidpy.Dimension\n\nget_slope = sidpy.base.num_utils.get_slope\n__version__ = '2022.3.3'\n\n\nclass FileWidget(object):\n \"\"\"Widget to select directories or widgets from a list\n\n Works in google colab.\n The widget converts the name of the nion file to the one in Nion's swift software,\n because it is otherwise incomprehensible\n\n Attributes\n ----------\n dir_name: str\n name of starting directory\n extension: list of str\n extensions of files to be listed in widget\n\n Methods\n -------\n get_directory\n set_options\n get_file_name\n\n Example\n -------\n >>from google.colab import drive\n >>drive.mount(\"/content/drive\")\n >>file_list = pyTEMlib.file_tools.FileWidget()\n next code cell:\n >>dataset = pyTEMlib.file_tools.open_file(file_list.file_name)\n\n \"\"\"\n\n def __init__(self, dir_name=None, extension=['*']):\n self.save_path = False\n self.dir_dictionary = {}\n self.dir_list = ['.', '..']\n self.display_list = ['.', '..']\n\n self.dir_name = '.'\n if dir_name is None:\n self.dir_name = get_last_path()\n self.save_path = True\n elif os.path.isdir(dir_name):\n self.dir_name = dir_name\n\n self.get_directory(self.dir_name)\n self.dir_list = ['.']\n self.extensions = extension\n self.file_name = ''\n self.datasets ={}\n self.dataset = None\n\n self.select_files = widgets.Select(\n options=self.dir_list,\n value=self.dir_list[0],\n description='Select file:',\n disabled=False,\n rows=10,\n layout=widgets.Layout(width='70%')\n )\n \n select_button = widgets.Button(description='Select Main',\n layout=widgets.Layout(width='auto', grid_area='header'),\n style=widgets.ButtonStyle(button_color='lightblue'))\n \n add_button = widgets.Button(description='Add',\n layout=widgets.Layout(width='auto', grid_area='header'),\n style=widgets.ButtonStyle(button_color='lightblue'))\n \n self.path_choice = widgets.Dropdown(options=['None'],\n value='None',\n description='directory:',\n disabled=False,\n button_style='',\n layout=widgets.Layout(width='90%'))\n self.dataset_list = ['None']\n self.loaded_datasets = widgets.Dropdown(options=self.dataset_list,\n value=self.dataset_list[0],\n description='loaded datasets:',\n disabled=False,\n button_style='')\n \n self.set_options()\n ui = widgets.VBox([self.path_choice, self.select_files, widgets.HBox([select_button, add_button, self.loaded_datasets])])\n display(ui)\n \n self.select_files.observe(self.get_file_name, names='value')\n self.path_choice.observe(self.set_dir, names='value')\n\n select_button.on_click(self.select_main)\n add_button.on_click(self.add_dataset)\n self.loaded_datasets.observe(self.selected_dataset)\n\n def select_main(self, value=0):\n self.datasets = {}\n self.loaded_datasets.value = self.dataset_list[0]\n self.datasets = open_file(self.file_name)\n self.dataset_list = []\n for key in self.datasets.keys():\n self.dataset_list.append(f'{key}: {self.datasets[key].title}')\n self.loaded_datasets.options = self.dataset_list\n self.loaded_datasets.value = self.dataset_list[0]\n self.dataset = self.datasets[list(self.datasets.keys())[0]]\n self.selected_dataset = self.dataset\n \n def add_dataset(self, value=0):\n key = add_dataset_from_file(self.datasets, self.file_name, 'Channel')\n self.dataset_list.append(f'{key}: {self.datasets[key].title}')\n self.loaded_datasets.options = self.dataset_list\n self.loaded_datasets.value = self.dataset_list[-1]\n\n def get_directory(self, directory=None):\n self.dir_name = directory\n self.dir_dictionary = {}\n self.dir_list = []\n self.dir_list = ['.', '..'] + os.listdir(directory)\n\n def set_dir(self, value=0):\n self.dir_name = self.path_choice.value\n self.select_files.index = 0\n self.set_options()\n\n def selected_dataset(self, value=0):\n \n key = self.loaded_datasets.value.split(':')[0]\n if key != 'None':\n self.selected_dataset = self.datasets[key]\n\n def set_options(self):\n self.dir_name = os.path.abspath(os.path.join(self.dir_name, self.dir_list[self.select_files.index]))\n dir_list = os.listdir(self.dir_name)\n file_dict = update_directory_list(self.dir_name)\n\n sort = np.argsort(file_dict['directory_list'])\n self.dir_list = ['.', '..']\n self.display_list = ['.', '..']\n for j in sort:\n self.display_list.append(f\" * {file_dict['directory_list'][j]}\")\n self.dir_list.append(file_dict['directory_list'][j])\n\n sort = np.argsort(file_dict['display_file_list'])\n\n for i, j in enumerate(sort):\n if '--' in dir_list[j]:\n self.display_list.append(f\" {i:3} {file_dict['display_file_list'][j]}\")\n else:\n self.display_list.append(f\" {i:3} {file_dict['display_file_list'][j]}\")\n self.dir_list.append(file_dict['file_list'][j])\n\n self.dir_label = os.path.split(self.dir_name)[-1] + ':'\n self.select_files.options = self.display_list\n \n path = self.dir_name\n old_path = ' '\n path_list = []\n while path != old_path:\n path_list.append(path)\n old_path = path\n path = os.path.split(path)[0]\n self.path_choice.options = path_list\n self.path_choice.value = path_list[0]\n\n def get_file_name(self, b):\n\n if os.path.isdir(os.path.join(self.dir_name, self.dir_list[self.select_files.index])):\n self.set_options()\n\n elif os.path.isfile(os.path.join(self.dir_name, self.dir_list[self.select_files.index])):\n self.file_name = os.path.join(self.dir_name, self.dir_list[self.select_files.index])\n\n\nclass ChooseDataset(object):\n \"\"\"Widget to select dataset object \"\"\"\n\n def __init__(self, input_object, show_dialog=True):\n self.datasets = None\n if isinstance(input_object, sidpy.Dataset):\n if isinstance(input_object.h5_dataset, h5py.Dataset):\n self.current_channel = input_object.h5_dataset.parent\n elif isinstance(input_object, h5py.Group):\n self.current_channel = input_object\n elif isinstance(input_object, h5py.Dataset):\n self.current_channel = input_object.parent\n elif isinstance(input_object, dict):\n self.datasets = input_object\n else:\n raise ValueError('Need hdf5 group or sidpy Dataset to determine image choices')\n self.dataset_names = []\n self.dataset_list = []\n self.dataset_type = None\n self.dataset = None\n if not isinstance(self.datasets, dict):\n self.reader = SciFiReaders.NSIDReader(self.current_channel.file.filename)\n else:\n self.reader = None\n self.get_dataset_list()\n self.select_image = widgets.Dropdown(options=self.dataset_list,\n value=self.dataset_list[0],\n description='select dataset:',\n disabled=False,\n button_style='')\n if show_dialog:\n display(self.select_image)\n\n self.select_image.observe(self.set_dataset, names='value')\n self.set_dataset(0)\n self.select_image.index = (len(self.dataset_names) - 1)\n\n def get_dataset_list(self):\n \"\"\" Get by Log number sorted list of datasets\"\"\"\n if not isinstance(self.datasets, dict):\n dataset_list = self.reader.read()\n self.datasets = {}\n for dataset in dataset_list:\n self.datasets[dataset.title] = dataset\n order = []\n keys = []\n for title, dset in self.datasets.items():\n if isinstance(dset, sidpy.Dataset):\n if self.dataset_type is None or dset.data_type == self.data_type:\n if 'Log' in title:\n order.append(2)\n else:\n order.append(0)\n keys.append(title)\n for index in np.argsort(order):\n self.dataset_names.append(keys[index])\n self.dataset_list.append(keys[index] + ': ' + self.datasets[keys[index]].title)\n\n def set_dataset(self, b):\n index = self.select_image.index\n self.key = self.dataset_names[index]\n self.dataset = self.datasets[self.key]\n self.dataset.title = self.dataset.title.split('/')[-1]\n self.dataset.title = self.dataset.title.split('/')[-1]\n\n\ndef add_to_dict(file_dict, name):\n full_name = os.path.join(file_dict['directory'], name)\n basename, extension = os.path.splitext(name)\n size = os.path.getsize(full_name) * 2 ** -20\n display_name = name\n if len(extension) == 0:\n display_file_list = f' {name} - {size:.1f} MB'\n elif extension[0] == 'hf5':\n if extension in ['.hf5']:\n display_file_list = f\" {name} - {size:.1f} MB\"\n elif extension in ['.h5', '.ndata']:\n try:\n reader = SciFiReaders.NionReader(full_name)\n dataset_nion = reader.read()\n display_name = dataset_nion.title\n display_file_list = f\" {display_name}{extension} - {size:.1f} MB\"\n except:\n display_file_list = f\" {name} - {size:.1f} MB\"\n else:\n display_file_list = f' {name} - {size:.1f} MB'\n file_dict[name] = {'display_string': display_file_list, 'basename': basename, 'extension': extension,\n 'size': size, 'display_name': display_name}\n\n\ndef update_directory_list(directory_name):\n dir_list = os.listdir(directory_name)\n\n if '.pyTEMlib.files.pkl' in dir_list:\n with open(os.path.join(directory_name, '.pyTEMlib.files.pkl'), 'rb') as f:\n file_dict = pickle.load(f)\n if directory_name != file_dict['directory']:\n print('directory moved since last time read')\n file_dict['directory'] = directory_name\n dir_list.remove('.pyTEMlib.files.pkl')\n else:\n file_dict = {'directory': directory_name}\n\n # add new files\n file_dict['file_list'] = []\n file_dict['display_file_list'] = []\n file_dict['directory_list'] = []\n\n for name in dir_list:\n if os.path.isfile(os.path.join(file_dict['directory'], name)):\n if name not in file_dict:\n add_to_dict(file_dict, name)\n file_dict['file_list'].append(name)\n file_dict['display_file_list'].append(file_dict[name]['display_string'])\n else:\n file_dict['directory_list'].append(name)\n remove_item = []\n\n # delete items of deleted files\n save_pickle = False\n\n for name in file_dict.keys():\n if name not in dir_list and name not in ['directory', 'file_list', 'directory_list', 'display_file_list']:\n remove_item.append(name)\n else:\n if 'extension' in file_dict[name]:\n save_pickle = True\n for item in remove_item:\n file_dict.pop(item)\n\n if save_pickle:\n with open(os.path.join(file_dict['directory'], '.pyTEMlib.files.pkl'), 'wb') as f:\n pickle.dump(file_dict, f)\n return file_dict\n\n\n####\n# General Open and Save Methods\n####\n\ndef get_last_path():\n \"\"\"Returns the path of the file last opened\"\"\"\n try:\n fp = open(config_path + '\\\\path.txt', 'r')\n path = fp.read()\n fp.close()\n except IOError:\n path = ''\n\n if len(path) < 2:\n path = '.'\n return path\n\n\ndef save_path(filename):\n \"\"\"Save path of last opened file\"\"\"\n\n if len(filename) > 1:\n fp = open(config_path + '\\\\path.txt', 'w')\n path, fname = os.path.split(filename)\n fp.write(path)\n fp.close()\n else:\n path = '.'\n return path\n\n\nif Qt_available:\n def get_qt_app():\n \"\"\"\n will start QT Application if not running yet\n\n :returns: QApplication\n\n \"\"\"\n\n # start qt event loop\n _instance = QtWidgets.QApplication.instance()\n if not _instance:\n # print('not_instance')\n _instance = QtWidgets.QApplication([])\n\n return _instance\n\n\ndef open_file_dialog_qt(file_types=None): # , multiple_files=False):\n \"\"\"Opens a File dialog which is used in open_file() function\n\n This function uses pyQt5.\n The app of the Gui has to be running for QT. Tkinter does not run on Macs at this point in time.\n In jupyter notebooks use %gui Qt early in the notebook.\n\n The file looks first for a path.txt file for the last directory you used.\n\n Parameters\n ----------\n file_types : string\n file type filter in the form of '*.hf5'\n\n\n Returns\n -------\n filename : string\n full filename with absolute path and extension as a string\n\n Example\n -------\n >> import file_tools as ft\n >> filename = ft.openfile_dialog()\n >> print(filename)\n\n \"\"\"\n \"\"\"will start QT Application if not running yet and returns QApplication \"\"\"\n\n # determine file types by extension\n if file_types is None:\n file_types = 'TEM files (*.dm3 *.dm4 *.emd *.ndata *.h5 *.hf5);;pyNSID files (*.hf5);;QF files ( *.qf3);;' \\\n 'DM files (*.dm3 *.dm4);;Nion files (*.ndata *.h5);;All files (*)'\n elif file_types == 'pyNSID':\n file_types = 'pyNSID files (*.hf5);;TEM files (*.dm3 *.dm4 *.qf3 *.ndata *.h5 *.hf5);;QF files ( *.qf3);;' \\\n 'DM files (*.dm3 *.dm4);;Nion files (*.ndata *.h5);;All files (*)'\n\n # file_types = [(\"TEM files\",[\"*.dm*\",\"*.hf*\",\"*.ndata\" ]),(\"pyNSID files\",\"*.hf5\"),(\"DM files\",\"*.dm*\"),\n # (\"Nion files\",[\"*.h5\",\"*.ndata\"]),(\"all files\",\"*.*\")]\n\n # Determine last path used\n path = get_last_path()\n\n if Qt_available:\n _ = get_qt_app()\n filename = sidpy.io.interface_utils.openfile_dialog_QT(file_types=file_types, file_path=path)\n save_path(filename)\n return filename\n\ndef save_file_dialog_qt(file_types=None): # , multiple_files=False):\n \"\"\"Opens a File dialog which is used in open_file() function\n\n This function uses pyQt5.\n The app of the Gui has to be running for QT. Tkinter does not run on Macs at this point in time.\n In jupyter notebooks use %gui Qt early in the notebook.\n\n The file looks first for a path.txt file for the last directory you used.\n\n Parameters\n ----------\n file_types : string\n file type filter in the form of '*.hf5'\n\n\n Returns\n -------\n filename : string\n full filename with absolute path and extension as a string\n\n Example\n -------\n >> import file_tools as ft\n >> filename = ft.openfile_dialog()\n >> print(filename)\n\n \"\"\"\n \"\"\"will start QT Application if not running yet and returns QApplication \"\"\"\n\n # determine file types by extension\n if file_types is None:\n file_types = 'pyNSID files (*.hf5);;TEM files (*.dm3 *.dm4 *.qf3 *.ndata *.h5 *.hf5);;QF files ( *.qf3);;' \\\n 'DM files (*.dm3 *.dm4);;Nion files (*.ndata *.h5);;All files (*)'\n elif file_types == 'TEM':\n file_types = 'TEM files (*.dm3 *.dm4 *.emd *.ndata *.h5 *.hf5);;pyNSID files (*.hf5);;QF files ( *.qf3);;' \\\n 'DM files (*.dm3 *.dm4);;Nion files (*.ndata *.h5);;All files (*)'\n\n \n # file_types = [(\"TEM files\",[\"*.dm*\",\"*.hf*\",\"*.ndata\" ]),(\"pyNSID files\",\"*.hf5\"),(\"DM files\",\"*.dm*\"),\n # (\"Nion files\",[\"*.h5\",\"*.ndata\"]),(\"all files\",\"*.*\")]\n\n # Determine last path used\n path = get_last_path()\n\n if Qt_available:\n _ = get_qt_app()\n filename = sidpy.io.interface_utils.savefile_dialog(file_types=file_types, file_path=path)\n save_path(filename)\n return filename\n\n\ndef save_dataset(dataset, filename=None, h5_group=None):\n \"\"\" Saves a dataset to a file in pyNSID format\n Parameters\n ----------\n dataset: sidpy.Dataset\n the data\n filename: str\n name of file to be opened, if filename is None, a QT file dialog will try to open\n h5_group: hd5py.Group\n not used yet\n \"\"\"\n if filename is None:\n filename = save_file_dialog_qt()\n h5_filename = get_h5_filename(filename)\n h5_file = h5py.File(h5_filename, mode='a')\n path, file_name = os.path.split(filename)\n basename, _ = os.path.splitext(file_name)\n\n if isinstance(dataset, dict):\n h5_group = save_dataset_dictionary(h5_file, dataset)\n return h5_group\n\n elif isinstance(dataset, sidpy.Dataset):\n h5_dataset = save_single_dataset(h5_file, dataset, h5_group=h5_group)\n return h5_dataset.parent\n else:\n raise TypeError('Only sidpy.datasets or dictionaries can be saved with pyTEMlib')\n\n\ndef save_single_dataset(h5_file, dataset, h5_group=None):\n if h5_group is None:\n h5_measurement_group = sidpy.hdf.prov_utils.create_indexed_group(h5_file, 'Measurement_')\n h5_group = sidpy.hdf.prov_utils.create_indexed_group(h5_measurement_group, 'Channel_')\n\n elif isinstance(h5_group, str):\n if h5_group not in h5_file:\n h5_group = h5_file.create_group(h5_group)\n else:\n if h5_group[-1] == '/':\n h5_group = h5_group[:-1]\n\n channel = h5_group.split('/')[-1]\n h5_measurement_group = h5_group[:-len(channel)]\n h5_group = sidpy.hdf.prov_utils.create_indexed_group(h5_group, 'Channel_')\n else:\n raise ValueError('h5_group needs to be string or None')\n\n h5_dataset = pyNSID.hdf_io.write_nsid_dataset(dataset, h5_group)\n dataset.h5_dataset = h5_dataset\n h5_dataset.file.flush()\n return h5_dataset\n\n\ndef save_dataset_dictionary(h5_file, datasets):\n h5_measurement_group = sidpy.hdf.prov_utils.create_indexed_group(h5_file, 'Measurement_')\n for key, dataset in datasets.items():\n if key[-1] == '/':\n key = key[:-1]\n if isinstance(dataset, sidpy.Dataset):\n h5_group = h5_measurement_group.create_group(key)\n h5_dataset = pyNSID.hdf_io.write_nsid_dataset(dataset, h5_group)\n dataset.h5_dataset = h5_dataset\n h5_dataset.file.flush()\n elif isinstance(dataset, dict):\n sidpy.hdf.hdf_utils.write_dict_to_h5_group(h5_measurement_group, dataset, key)\n else:\n print('could not save item ', key, 'of dataset dictionary')\n return h5_measurement_group\n\n\ndef h5_group_to_dict(group, group_dict={}):\n if not isinstance(group, h5py.Group):\n raise TypeError('we need a h5py group to read from')\n if not isinstance(group_dict, dict):\n raise TypeError('group_dict needs to be a python dictionary')\n\n group_dict[group.name.split('/')[-1]] = dict(group.attrs)\n for key in group.keys():\n h5_group_to_dict(group[key], group_dict[group.name.split('/')[-1]])\n return group_dict\n\n\ndef open_file(filename=None, h5_group=None, write_hdf_file=False): # save_file=False,\n \"\"\"Opens a file if the extension is .hf5, .ndata, .dm3 or .dm4\n\n If no filename is provided the QT open_file windows opens (if QT_available==True)\n Everything will be stored in a NSID style hf5 file.\n Subroutines used:\n - NSIDReader\n - nsid.write_\n - get_main_tags\n - get_additional tags\n\n Parameters\n ----------\n filename: str\n name of file to be opened, if filename is None, a QT file dialog will try to open\n h5_group: hd5py.Group\n not used yet #TODO: provide hook for usage of external chosen group\n write_hdf_file: bool\n set to false so that sidpy dataset will not be written to hf5-file automatically\n\n Returns\n -------\n sidpy.Dataset\n sidpy dataset with location of hdf5 dataset as attribute\n\n \"\"\"\n if filename is None:\n selected_file = open_file_dialog_qt()\n filename = selected_file\n \n else:\n if not isinstance(filename, str):\n raise TypeError('filename must be a non-empty string or None (to a QT open file dialog)')\n elif filename == '':\n raise TypeError('filename must be a non-empty string or None (to a QT open file dialog)')\n\n path, file_name = os.path.split(filename)\n basename, extension = os.path.splitext(file_name)\n\n if extension == '.hf5':\n reader = SciFiReaders.NSIDReader(filename)\n datasets = reader.read()\n if len(datasets) < 1:\n print('no hdf5 dataset found in file')\n return {}\n else:\n dataset_dict = {}\n for index, dataset in enumerate(datasets):\n title = dataset.title.split('/')[2]\n dataset.title = dataset.title.split('/')[-1]\n dataset_dict[title] = dataset\n if index == 0:\n file = datasets[0].h5_dataset.file\n master_group = datasets[0].h5_dataset.parent.parent.parent\n for key in master_group.keys():\n\n if key not in dataset_dict:\n dataset_dict[key] = h5_group_to_dict(master_group[key])\n print()\n if not write_hdf_file:\n file.close()\n # datasets[0].h5_dataset = None\n return dataset_dict\n\n \"\"\" \n should go to no dataset found\n if 'Raw_Data' in h5_group:\n dataset = read_old_h5group(h5_group)\n dataset.h5_dataset = h5_group['Raw_Data']\n \"\"\"\n\n elif extension in ['.dm3', '.dm4', '.ndata', '.ndata1', '.h5', '.emd', '.emi']:\n\n # tags = open_file(filename)\n if extension in ['.dm3', '.dm4']:\n reader = SciFiReaders.DMReader(filename)\n\n elif extension in ['.emi']:\n try:\n import hyperspy.api as hs\n s = hs.load(filename)\n dataset_dict = {}\n spectrum_number = 0\n if not isinstance(s, list):\n s = [s]\n for index, datum in enumerate(s):\n dset = SciFiReaders.convert_hyperspy(datum)\n if datum.data.ndim == 1:\n dset.title = dset.title + f'_{spectrum_number}_Spectrum'\n spectrum_number +=1\n elif datum.data.ndim == 3:\n dset.title = dset.title +'_SI'\n dset = dset.T\n dset.title = dset.title[11:]\n dataset_dict[f'Channel_{index:03d}']=dset\n return dataset_dict\n except ImportError:\n print('This file type needs hyperspy to be installed to be able to be read')\n return\n elif extension == '.emd':\n reader = SciFiReaders.EMDReader(filename)\n\n elif extension in ['.ndata', '.h5']:\n reader = SciFiReaders.NionReader(filename)\n\n else:\n raise NotImplementedError('extension not supported')\n\n path, file_name = os.path.split(filename)\n basename, _ = os.path.splitext(file_name)\n if extension != '.emi':\n dset = reader.read()\n\n if extension in ['.dm3', '.dm4']:\n title = (basename.strip().replace('-', '_')).split('/')[-1]\n if not isinstance(dset, list):\n print('Please use new SciFiReaders Package for full functionality')\n dset = [dset]\n if 'PageSetup' in dset[0].original_metadata:\n del dset[0].original_metadata['PageSetup']\n dset[0].original_metadata['original_title'] = title\n\n if isinstance(dset, list):\n if len(dset) < 1:\n print('no dataset found in file')\n return {}\n else:\n dataset_dict = {}\n for index, dataset in enumerate(dset):\n if extension == '.emi':\n if 'experiment' in dataset.metadata:\n if 'detector' in dataset.metadata['experiment']:\n dataset.title = dataset.metadata['experiment']['detector']\n dataset.filename = basename.strip()\n # read_essential_metadata(dataset)\n dataset.metadata['filename'] = filename\n dataset_dict[f'Channel_{index:03}'] = dataset\n else:\n dset.filename = basename.strip().replace('-', '_')\n read_essential_metadata(dset)\n dset.metadata['filename'] = filename\n dataset_dict = {'Channel_000': dset}\n \n if write_hdf_file:\n h5_master_group = save_dataset(dataset_dict, filename=filename)\n\n save_path(filename)\n return dataset_dict\n else:\n print('file type not handled yet.')\n return\n\n\n################################################################\n# Read Functions\n#################################################################\n\ndef read_essential_metadata(dataset):\n \"\"\"Updates dataset.metadata['experiment'] with essential information read from original metadata\n\n This depends on whether it is originally a nion or a dm3 file\n \"\"\"\n if not isinstance(dataset, sidpy.Dataset):\n raise TypeError(\"we need a sidpy.Dataset\")\n experiment_dictionary = {}\n if 'metadata' in dataset.original_metadata:\n if 'hardware_source' in dataset.original_metadata['metadata']:\n experiment_dictionary = read_nion_image_info(dataset.original_metadata)\n if 'DM' in dataset.original_metadata:\n experiment_dictionary = read_dm3_info(dataset.original_metadata)\n if 'experiment' not in dataset.metadata:\n dataset.metadata['experiment'] = {}\n\n dataset.metadata['experiment'].update(experiment_dictionary)\n\n\ndef read_dm3_info(original_metadata):\n \"\"\"Read essential parameter from original_metadata originating from a dm3 file\"\"\"\n if not isinstance(original_metadata, dict):\n raise TypeError('We need a dictionary to read')\n\n if 'DM' not in original_metadata:\n return {}\n if 'ImageTags' not in original_metadata:\n return {}\n exp_dictionary = original_metadata['ImageTags']\n experiment = {}\n if 'EELS' in exp_dictionary:\n if 'Acquisition' in exp_dictionary['EELS']:\n for key, item in exp_dictionary['EELS']['Acquisition'].items():\n if 'Exposure' in key:\n _, units = key.split('(')\n if units[:-1] == 's':\n experiment['single_exposure_time'] = item\n if 'Integration' in key:\n _, units = key.split('(')\n if units[:-1] == 's':\n experiment['exposure_time'] = item\n if 'frames' in key:\n experiment['number_of_frames'] = item\n\n if 'Experimental Conditions' in exp_dictionary['EELS']:\n for key, item in exp_dictionary['EELS']['Experimental Conditions'].items():\n if 'Convergence' in key:\n experiment['convergence_angle'] = item\n if 'Collection' in key:\n # print(item)\n # for val in item.values():\n experiment['collection_angle'] = item\n if 'number_of_frames' not in experiment:\n experiment['number_of_frames'] = 1\n if 'exposure_time' not in experiment:\n if 'single_exposure_time' in experiment:\n experiment['exposure_time'] = experiment['number_of_frames'] * experiment['single_exposure_time']\n\n else:\n if 'Acquisition' in exp_dictionary:\n if 'Parameters' in exp_dictionary['Acquisition']:\n if 'High Level' in exp_dictionary['Acquisition']['Parameters']:\n if 'Exposure (s)' in exp_dictionary['Acquisition']['Parameters']['High Level']:\n experiment['exposure_time'] = exp_dictionary['Acquisition']['Parameters']['High Level'][\n 'Exposure (s)']\n\n if 'Microscope Info' in exp_dictionary:\n if 'Microscope' in exp_dictionary['Microscope Info']:\n experiment['microscope'] = exp_dictionary['Microscope Info']['Microscope']\n if 'Voltage' in exp_dictionary['Microscope Info']:\n experiment['acceleration_voltage'] = exp_dictionary['Microscope Info']['Voltage']\n\n return experiment\n\n\ndef read_nion_image_info(original_metadata):\n \"\"\"Read essential parameter from original_metadata originating from a dm3 file\"\"\"\n if not isinstance(original_metadata, dict):\n raise TypeError('We need a dictionary to read')\n if 'metadata' not in original_metadata:\n return {}\n if 'hardware_source' not in original_metadata['metadata']:\n return {}\n if 'ImageScanned' not in original_metadata['metadata']['hardware_source']:\n return {}\n\n exp_dictionary = original_metadata['metadata']['hardware_source']['ImageScanned']\n experiment = exp_dictionary\n # print(exp_dictionary)\n if 'autostem' in exp_dictionary:\n pass\n\n\ndef get_h5_filename(fname):\n \"\"\"Determines file name of hdf5 file for newly converted data file\"\"\"\n\n path, filename = os.path.split(fname)\n basename, extension = os.path.splitext(filename)\n h5_file_name_original = os.path.join(path, basename + '.hf5')\n h5_file_name = h5_file_name_original\n\n if os.path.exists(os.path.abspath(h5_file_name_original)):\n count = 1\n h5_file_name = h5_file_name_original[:-4] + '-' + str(count) + '.hf5'\n while os.path.exists(os.path.abspath(h5_file_name)):\n count += 1\n h5_file_name = h5_file_name_original[:-4] + '-' + str(count) + '.hf5'\n\n if h5_file_name != h5_file_name_original:\n path, filename = os.path.split(h5_file_name)\n print('Cannot overwrite file. Using: ', filename)\n return str(h5_file_name)\n\n\ndef get_start_channel(h5_file):\n \"\"\" Legacy for get start channel\"\"\"\n\n DeprecationWarning('Depreciated: use function get_main_channel instead')\n return get_main_channel(h5_file)\n\n\ndef get_main_channel(h5_file):\n \"\"\"Returns name of first channel group in hdf5-file\"\"\"\n\n current_channel = None\n if 'Measurement_000' in h5_file:\n if 'Measurement_000/Channel_000' in h5_file:\n current_channel = h5_file['Measurement_000/Channel_000']\n return current_channel\n\n\ndef h5_tree(input_object):\n \"\"\"Just a wrapper for the sidpy function print_tree,\n\n so that sidpy does not have to be loaded in notebook\n\n \"\"\"\n\n if isinstance(input_object, sidpy.Dataset):\n if not isinstance(input_object.h5_dataset, h5py.Dataset):\n raise ValueError('sidpy dataset does not have an associated h5py dataset')\n h5_file = input_object.h5_dataset.file\n elif isinstance(input_object, h5py.Dataset):\n h5_file = input_object.file\n elif isinstance(input_object, (h5py.Group, h5py.File)):\n h5_file = input_object\n else:\n raise TypeError('should be a h5py.object or sidpy Dataset')\n sidpy.hdf_utils.print_tree(h5_file)\n\n\ndef log_results(h5_group, dataset=None, attributes=None):\n \"\"\"Log Results in hdf5-file\n\n Saves either a sidpy.Dataset or dictionary in a hdf5-file.\n The group for the result will consist of 'Log_' and a running index.\n That group will be placed in h5_group.\n\n Parameters\n ----------\n h5_group: hd5py.Group, or sidpy.Dataset\n groups where result group are to be stored\n dataset: sidpy.Dataset or None\n sidpy dataset to be stored\n attributes: dict\n dictionary containing results that are not based on a sidpy.Dataset\n\n Returns\n -------\n log_group: hd5py.Group\n group in hdf5 file with results.\n\n \"\"\"\n if isinstance(h5_group, sidpy.Dataset):\n h5_group = h5_group.h5_dataset\n if not isinstance(h5_group, h5py.Dataset):\n raise TypeError('Use h5_dataset of sidpy.Dataset is not a valid h5py.Dataset')\n h5_group = h5_group.parent.parent\n\n if not isinstance(h5_group, h5py.Group):\n raise TypeError('Need a valid h5py.Group for logging results')\n\n if dataset is None:\n log_group = sidpy.hdf.prov_utils.create_indexed_group(h5_group, 'Log_')\n else:\n log_group = pyNSID.hdf_io.write_results(h5_group, dataset=dataset)\n if hasattr(dataset, 'meta_data'):\n if 'analysis' in dataset.meta_data:\n log_group['analysis'] = dataset.meta_data['analysis']\n if hasattr(dataset, 'structures'):\n for structure in dataset.structures.values():\n h5_add_crystal_structure(log_group, structure)\n\n dataset.h5_dataset = log_group[dataset.title.replace('-', '_')][dataset.title.replace('-', '_')]\n if attributes is not None:\n for key, item in attributes.items():\n if not isinstance(item, dict):\n log_group[key] = attributes[key]\n else:\n log_group.create_group(key)\n sidpy.hdf.hdf_utils.write_simple_attrs(log_group[key], attributes[key])\n return log_group\n\n\ndef add_dataset_from_file(datasets, filename=None, key_name='Log', single_dataset=True):\n \"\"\"Add dataset to datasets dictionary\n\n Parameters\n ----------\n dataset: dict \n dictionary to write to file\n filename: str, default: None, \n name of file to open, if None, adialog will appear\n key_name: str, default: 'Log'\n name for key in dictionary with running number being added\n\n Returns\n -------\n key_name: str\n actual last used name of dictionary key\n \"\"\"\n\n datasets2 = open_file(filename=filename)\n first_dataset = datasets2[list(datasets2)[0]]\n if isinstance(first_dataset, sidpy.Dataset):\n \n index = 0\n for key in datasets.keys():\n if key_name in key:\n if int(key[-3:]) >= index:\n index = int(key[-3:])+1\n if single_dataset:\n datasets[key_name+f'_{index:03}'] = first_dataset\n else:\n for dataset in datasets2.values():\n datasets[key_name+f'_{index:03}'] = dataset\n index += 1\n index -= 1\n else:\n return None \n\n return f'{key_name}_{index:03}'\n\n\n# ##\n# Crystal Structure Read and Write\n# ##\ndef read_poscar(file_name=None):\n \"\"\"\n Open a POSCAR file from Vasp\n If no file name is provided an open file dialog to select a POSCAR file appears\n\n Parameters\n ----------\n file_name: str\n if None is provided an open file dialog will appear\n\n Return\n ------\n crystal: ase.Atoms\n crystal structure in ase format\n \"\"\"\n\n if file_name is None:\n file_name = open_file_dialog_qt('POSCAR (POSCAR*.txt);;All files (*)')\n\n # use ase package to read file\n base = os.path.basename(file_name)\n base_name = os.path.splitext(base)[0]\n crystal = ase.io.read(file_name, format='vasp', parallel=False)\n\n # make dictionary and plot structure (not essential for further notebook)\n crystal.info = {'title': base_name}\n return crystal\n\n\ndef read_cif(file_name=None, verbose=False): # open file dialog to select cif file\n \"\"\"\n Open a cif file\n If no file name is provided an open file dialog to select a cif file appears\n\n Parameters\n ----------\n file_name: str\n if None is provided an open file dialog will appear\n verbose: bool\n\n Return\n ------\n crystal: ase.Atoms\n crystal structure in ase format\n \"\"\"\n\n if file_name is None:\n file_name = open_file_dialog_qt('cif (*.cif);;All files (*)')\n # use ase package to read file\n\n base = os.path.basename(file_name)\n base_name = os.path.splitext(base)[0]\n crystal = ase.io.read(file_name, format='cif', store_tags=True, parallel=False)\n\n # make dictionary and plot structure (not essential for further notebook)\n if crystal.info is None:\n crystal.info = {'title': base_name}\n crystal.info.update({'title': base_name})\n if verbose:\n print('Opened cif file for ', crystal.get_chemical_formula())\n\n return crystal\n\n\ndef h5_add_crystal_structure(h5_file, input_structure, name=None):\n \"\"\"Write crystal structure to NSID file\"\"\"\n\n if isinstance(input_structure, ase.Atoms):\n\n crystal_tags = pyTEMlib.crystal_tools.get_dictionary(input_structure)\n if crystal_tags['metadata'] == {}:\n crystal_tags['metadata'] = {'title': input_structure.get_chemical_formula()}\n elif isinstance(input_structure, dict):\n crystal_tags = input_structure\n else:\n raise TypeError('Need a dictionary or an ase.Atoms object with ase installed')\n\n structure_group = sidpy.hdf.prov_utils.create_indexed_group(h5_file, 'Structure_')\n\n for key, item in crystal_tags.items():\n if not isinstance(item, dict):\n structure_group[key] = item\n\n if 'base' in crystal_tags:\n structure_group['relative_positions'] = crystal_tags['base']\n if 'title' in crystal_tags:\n structure_group['title'] = str(crystal_tags['title'])\n structure_group['_' + crystal_tags['title']] = str(crystal_tags['title'])\n\n # ToDo: Save all of info dictionary\n if 'metadata' in input_structure:\n structure_group.create_group('metadata')\n sidpy.hdf.hdf_utils.write_simple_attrs(structure_group['metadata'], input_structure['metadata'])\n\n h5_file.file.flush()\n return structure_group\n\n\ndef h5_add_to_structure(structure_group, crystal_tags):\n \"\"\"add dictionary as structure group\"\"\"\n\n for key in crystal_tags:\n if key in structure_group.keys():\n print(key, ' not written; use new name')\n else:\n structure_group[key] = crystal_tags[key]\n\n\ndef h5_get_crystal_structure(structure_group):\n \"\"\"Read crystal structure from NSID file\n Any additional information will be read as dictionary into the info attribute of the ase.Atoms object\n\n Parameters\n ----------\n structure_group: h5py.Group\n location in hdf5 file to where the structure information is stored\n\n Returns\n -------\n atoms: ase.Atoms object\n crystal structure in ase format\n\n \"\"\"\n\n crystal_tags = {'unit_cell': structure_group['unit_cell'][()],\n 'base': structure_group['relative_positions'][()],\n 'title': structure_group['title'][()]}\n if '2D' in structure_group:\n crystal_tags['2D'] = structure_group['2D'][()]\n elements = structure_group['elements'][()]\n crystal_tags['elements'] = []\n for e in elements:\n crystal_tags['elements'].append(e.astype(str, copy=False))\n\n atoms = pyTEMlib.crystal_tools.atoms_from_dictionary(crystal_tags)\n if 'metadata' in structure_group:\n atoms.info = sidpy.hdf.hdf_utils.h5_group_to_dict(structure_group)\n\n if 'zone_axis' in structure_group:\n atoms.info = {'experiment': {'zone_axis': structure_group['zone_axis'][()]}}\n # ToDo: Read all of info dictionary\n return atoms\n\n\n###############################################\n# Support old pyTEM file format\n###############################################\n\ndef read_old_h5group(current_channel):\n \"\"\"Make a sidpy.Dataset from pyUSID style hdf5 group\n\n Parameters\n ----------\n current_channel: h5_group\n\n Returns\n -------\n sidpy.Dataset\n \"\"\"\n\n dim_dir = []\n if 'nDim_Data' in current_channel:\n h5_dataset = current_channel['nDim_Data']\n reader = pyNSID.NSIDReader(h5_dataset.file.filename)\n dataset = reader.read(h5_dataset)\n dataset.h5_file = current_channel.file\n return dataset\n elif 'Raw_Data' in current_channel:\n if 'image_stack' in current_channel:\n sid_dataset = sidpy.Dataset.from_array(np.swapaxes(current_channel['image_stack'][()], 2, 0))\n dim_dir = ['SPATIAL', 'SPATIAL', 'TEMPORAL']\n elif 'data' in current_channel:\n sid_dataset = sidpy.Dataset.from_array(current_channel['data'][()])\n dim_dir = ['SPATIAL', 'SPATIAL']\n else:\n size_x = int(current_channel['spatial_size_x'][()])\n size_y = int(current_channel['spatial_size_y'][()])\n if 'spectral_size_x' in current_channel:\n size_s = int(current_channel['spectral_size_x'][()])\n else:\n size_s = 0\n data = np.reshape(current_channel['Raw_Data'][()], (size_x, size_y, size_s))\n sid_dataset = sidpy.Dataset.from_array(data)\n if size_x > 1:\n dim_dir.append('SPATIAL')\n if size_y > 1:\n dim_dir.append('SPATIAL')\n if size_s > 1:\n dim_dir.append('SPECTRAL')\n sid_dataset.h5_dataset = current_channel['Raw_Data']\n\n elif 'data' in current_channel:\n sid_dataset = sidpy.Dataset.from_array(current_channel['data'][()])\n dim_dir = ['SPATIAL', 'SPATIAL']\n sid_dataset.h5_dataset = current_channel['data']\n else:\n return\n\n if 'SPATIAL' in dim_dir:\n if 'SPECTRAL' in dim_dir:\n sid_dataset.data_type = sidpy.DataType.SPECTRAL_IMAGE\n elif 'TEMPORAL' in dim_dir:\n sid_dataset.data_type = sidpy.DataType.IMAGE_STACK\n else:\n sid_dataset.data_type = sidpy.DataType.IMAGE\n else:\n sid_dataset.data_type = sidpy.DataType.SPECTRUM\n\n sid_dataset.quantity = 'intensity'\n sid_dataset.units = 'counts'\n if 'analysis' in current_channel:\n sid_dataset.source = current_channel['analysis'][()]\n\n set_dimensions(sid_dataset, current_channel)\n\n return sid_dataset\n\n\ndef set_dimensions(dset, current_channel):\n \"\"\"Attaches correct dimension from old pyTEMlib style.\n\n Parameters\n ----------\n dset: sidpy.Dataset\n current_channel: hdf5.Group\n \"\"\"\n dim = 0\n if dset.data_type == sidpy.DataType.IMAGE_STACK:\n dset.set_dimension(dim, sidpy.Dimension(np.arange(dset.shape[dim]), name='frame',\n units='frame', quantity='stack',\n dimension_type='TEMPORAL'))\n dim += 1\n if 'IMAGE' in dset.data_type:\n\n if 'spatial_scale_x' in current_channel:\n scale_x = current_channel['spatial_scale_x'][()]\n else:\n scale_x = 1\n if 'spatial_units' in current_channel:\n units_x = current_channel['spatial_units'][()]\n if len(units_x) < 2:\n units_x = 'pixel'\n else:\n units_x = 'generic'\n if 'spatial_scale_y' in current_channel:\n scale_y = current_channel['spatial_scale_y'][()]\n else:\n scale_y = 0\n dset.set_dimension(dim, sidpy.Dimension('x', np.arange(dset.shape[dim])*scale_x,\n units=units_x, quantity='Length',\n dimension_type='SPATIAL'))\n dim += 1\n dset.set_dimension(dim, sidpy.Dimension('y', np.arange(dset.shape[dim])*scale_y,\n units=units_x, quantity='Length',\n dimension_type='SPATIAL'))\n dim += 1\n if dset.data_type in [sidpy.DataType.SPECTRUM, sidpy.DataType.SPECTRAL_IMAGE]:\n if 'spectral_scale_x' in current_channel:\n scale_s = current_channel['spectral_scale_x'][()]\n else:\n scale_s = 1.0\n if 'spectral_units_x' in current_channel:\n units_s = current_channel['spectral_units_x']\n else:\n units_s = 'eV'\n\n if 'spectral_offset_x' in current_channel:\n offset = current_channel['spectral_offset_x']\n else:\n offset = 0.0\n dset.set_dimension(dim, sidpy.Dimension(np.arange(dset.shape[dim]) * scale_s + offset,\n name='energy',\n units=units_s,\n quantity='energy_loss',\n dimension_type='SPECTRAL'))\n","sub_path":"pyTEMlib/file_tools.py","file_name":"file_tools.py","file_ext":"py","file_size_in_byte":46102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"80408477","text":"from __future__ import unicode_literals, print_function\nimport sys\nimport codecs\nimport json\nimport uuid\nfrom protolib.python import document_pb2, edgRules_pb2\nfrom utils.helper import DocHelper\nimport shortuuid\nfrom collections import defaultdict, namedtuple\nimport re\n\nclass EdgArg(object):\n def __init__(self, rule_id, arg_number, edge_name, token_index, head_noun, base_np, np, edg_type):\n self.rule_id = rule_id\n self.arg_number = arg_number\n self.edge_name = edge_name\n self.token_index = token_index\n self.head_noun = head_noun\n self.base_np = base_np\n self.np = np\n self.edg_type = edg_type\n\n def __repr__(self):\n return 'EdgArg('+ self.rule_id + \", \" + self.arg_number + \", \" + self.head_noun + \\\n \", \" + self.np + \", \" + \")\" + \"\\n\"\n\nclass EdgRelation(object):\n def __init__(self, name, trigger_index, trigger_head, trigger_phrase, args):\n self.name = name\n self.trigger_index = trigger_index\n self.trigger_head = trigger_head\n self.trigger_phrase = trigger_phrase\n self.args = args\n self.arg0s = list() \n self.arg1s = list() \n self.arg2s = list() \n\n def __repr__(self):\n return 'EdgRelation(' + self.name + \", \" +self.trigger_phrase + \"\\n\" + \\\n \"ARG0s :\" + repr(self.arg0s) + \"\\n\\n\" + \\\n \"ARG1s :\" + repr(self.arg1s) + \"\\n\\n\" + \\\n \"ARG2s :\" + repr(self.arg2s) + \"\\n\\n\"\n\n def getEdgRelationNumArgsIndex(self):\n arg0s_set = set()\n arg1s_set = set()\n arg2s_set = set()\n for arg in self.arg0s:\n arg_key = arg.token_index\n arg0s_set.add(arg_key)\n for arg in self.arg1s:\n arg_key = arg.token_index\n arg1s_set.add(arg_key)\n for arg in self.arg2s:\n arg_key = arg.token_index\n arg2s_set.add(arg_key)\n if not arg0s_set:\n arg0s_set.add(\"NA\")\n if not arg1s_set:\n arg1s_set.add(\"NA\")\n if not arg2s_set:\n arg2s_set.add(\"NA\")\n\n numb_args_list = list()\n numb_args_list = [[a0,a1,a2] for a0 in arg0s_set for a1 in arg1s_set for a2 in arg2s_set]\n \n return numb_args_list\n \n def getEdgRelationNumArgs(self):\n arg0s_set = set()\n arg1s_set = set()\n arg2s_set = set()\n for arg in self.arg0s:\n arg_key = arg.head_noun+\"\\t\"+arg.base_np+\"\\t\"+arg.np\n arg0s_set.add(arg_key)\n for arg in self.arg1s:\n arg_key = arg.head_noun+\"\\t\"+arg.base_np+\"\\t\"+arg.np\n arg1s_set.add(arg_key)\n for arg in self.arg2s:\n arg_key = arg.head_noun+\"\\t\"+arg.base_np+\"\\t\"+arg.np\n arg2s_set.add(arg_key)\n if not arg0s_set:\n arg0s_set.add(\"NA\\tNA\\tNA\")\n if not arg1s_set:\n arg1s_set.add(\"NA\\tNA\\tNA\")\n if not arg2s_set:\n arg2s_set.add(\"NA\\tNA\\tNA\")\n\n numb_args_list = list()\n numb_args_list = [[a0,a1,a2] for a0 in arg0s_set for a1 in arg1s_set for a2 in arg2s_set]\n \n return numb_args_list \n #toPrintRel = [\"inv\", \"reg\", \"ass\", \"exp\" , \"cmp\"]\n #if self.name in toPrintRel:\n # print (\"Relation Name: \"+self.name)\n # print (\"Trigger: \"+self.trigger_head+\"\\t\"+self.trigger_phrase+\"\\n\")\n # for numb_args in numb_args_list:\n # print (\"Arg0: \"+numb_args[0])\n # print (\"Arg1: \"+numb_args[1])\n # print (\"Arg2: \"+numb_args[2])\n # print (\"\\n\")\n\n\n def populateNumberedArgs(self):\n for arg in self.args:\n arg_number = arg.arg_number\n if arg_number == \"arg0\":\n self.arg0s.append(arg)\n elif arg_number == \"arg1\":\n self.arg1s.append(arg)\n elif arg_number == \"arg2\":\n self.arg2s.append(arg)\n else:\n arg_error = arg\n\nclass EdgRelations(object):\n def __init__(self, doc_id, sent_id):\n self.doc_id = doc_id\n self.sent_id = sent_id\n self.relations = list() \n\n def __repr__(self):\n return str(self.doc_id) + \"-\" + str(self.sent_id) + \"\\n\" + repr(self.relations)\n\n\n\n def setRelations(self, doc_helper, sentence, dependencies):\n doc_id = self.doc_id\n sent_id = self.sent_id\n relation_dict = {}\n vp_labels = [\"VP\", \"ADVP\", \"VB\", \"VBD\", \"VBG\", \"VBN\", \"VBP\", \"VBZ\"]\n for dependency in dependencies:\n gov_index = dependency.gov_index\n dep_index = dependency.dep_index\n edge_name = dependency.relation\n rule_id = dependency.rule_id\n \n gov_head = doc_helper.doc.token[gov_index].word \n #gov_next_head = doc_helper.doc.token[gov_index+1].word \n #gov_next_next_head = doc_helper.doc.token[gov_index+2].word \n dep_head = doc_helper.doc.token[dep_index].word\n gov_pos = doc_helper.doc.token[gov_index].pos \n dep_pos = doc_helper.doc.token[dep_index].pos\n\n #pos_ignore_list = [\"JJR\", \"JJ\"]\n #es_ignore_list = [\"than\", \"versus\", \"vs.\"]\n #jjr_ruleid_igore_list = [\"cmp1_than_2\", \"cmp1_than_1\", \"cmp1_vs_1\"]\n #dt_not_ignore_list = [\"that\", \"those\", \"this\"]\n #if gov_pos in pos_ignore_list and gov_next_head in es_ignore_list and rule_id in jjr_ruleid_igore_list and gov_next_next_head not in dt_not_ignore_list:\n # #print (\"Samir: \" + gov_head + \"\\t\" + dep_head + \"\\t\" + edge_name + \"\\t\" + rule_id)\n # continue \n gov_np = \"NA\"\n if gov_pos in vp_labels:\n gov_np = doc_helper.getTokenVG(sentence, gov_index)\n else:\n gov_np = doc_helper.getTokenNP(sentence, gov_index)\n dep_np = \"NA\" \n if dep_pos in vp_labels:\n dep_np = doc_helper.getTokenVG(sentence, dep_index)\n else:\n dep_np = doc_helper.getTokenNP(sentence, dep_index)\n dep_base_np = doc_helper.getTokenBaseNP(sentence, dep_index)\n #print (\"Samir: \" + gov_head + \"\\t\" + dep_head + \"\\t\" + edge_name + \"\\t\" + rule_id)\n relName = \"NA\"\n arg_number = \"arg3\"\n if re.search(r'arg[0-9]+_',edge_name):\n tokens = edge_name.split(\"_\")\n relName = tokens[1]\n arg_number = tokens[0]\n #new_trigger = TriggerTuple(rel_name = relName, trigger_index = gov_index)\n new_trigger = relName + \"\\t\" + str(gov_index)\n if new_trigger in relation_dict:\n relation = relation_dict[new_trigger]\n new_arg = EdgArg(rule_id, arg_number, edge_name, dep_index, dep_head, dep_base_np, dep_np, \"NONE\")\n relation.args.append(new_arg)\n else:\n new_arg = EdgArg(rule_id, arg_number, edge_name, dep_index, dep_head, dep_base_np, dep_np, \"NONE\")\n relation = EdgRelation(relName, gov_index, gov_head, gov_np, [new_arg])\n self.relations.append(relation)\n relation_dict[new_trigger] = relation\n\n for relation in self.relations:\n relation.populateNumberedArgs()\n","sub_path":"utils/edg_relations/edg_relations.py","file_name":"edg_relations.py","file_ext":"py","file_size_in_byte":7331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"2295024","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jan 19 20:11:31 2019\r\n\r\n@author: Oguz\r\n\"\"\"\r\n\r\n#Butun fonksiyonlar detaylica 'documentation.txt' dosyasında aciklanmistir\r\ndef idBulma(ilacIsim):#ilacin isminden yola cikarak ilacin veritabanindaki id'sine erismek icin kullanilir\r\n ilacID = ilacVeriTabani.execute(\"SELECT id FROM ilaclar WHERE isim=?\", (ilacIsim,)).fetchall()\r\n return(ilacID[0][0])\r\n \r\ndef isimBulma(ilacID): #ilacin veritabanindaki id'sinden yola cikarak ilacin ismine erismek icin kullanilir \r\n ilacIsim = ilacVeriTabani.execute(\"SELECT isim FROM ilaclar WHERE id=?\", (ilacID,)).fetchall()\r\n return(ilacIsim[0][0])\r\n \r\ndef TCdenID(TC):#hastanin Turkiye Cumhuriyeti kimlik numarasından veritabanindaki id'sine erismek icin kullanilir\r\n hastaID = hastaVeriTabani.execute(\"SELECT id FROM hastalar WHERE TC=?\", (TC,)).fetchall()\r\n return(hastaID[0][0])\r\n\r\ndef kullanma(ilacID):#id'si ilacID olan ilacla beraber kullanilmamasi gereken ilaclarin id'lerini almak icin kullanilir\r\n ilac = ilacVeriTabani.execute(\"SELECT kullanma FROM ilaclar\").fetchall()\r\n ilacStr = \"\".join(ilac[ilacID-1][0])\r\n ilacStr = ilacStr.split(\",\")\r\n return(ilacStr)\r\n\r\ndef ilacBilgi(ilacID):#veritabanidaki ilacin bilgilerini almak için kullanılır\r\n isim = ilacVeriTabani.execute(\"SELECT isim FROM ilaclar\").fetchall()\r\n tag = ilacVeriTabani.execute(\"SELECT tag FROM ilaclar\").fetchall()\r\n antibiyotik = ilacVeriTabani.execute(\"SELECT antibiyotik FROM ilaclar\").fetchall()\r\n agri = ilacVeriTabani.execute(\"SELECT agri FROM ilaclar\").fetchall()\r\n antidep = ilacVeriTabani.execute(\"SELECT antidep FROM ilaclar\").fetchall()\r\n dogum = ilacVeriTabani.execute(\"SELECT dogum FROM ilaclar\").fetchall()\r\n inceltici = ilacVeriTabani.execute(\"SELECT inceltici FROM ilaclar\").fetchall()\r\n NSAID = ilacVeriTabani.execute(\"SELECT NSAID FROM ilaclar\").fetchall()\r\n bobrek = ilacVeriTabani.execute(\"SELECT bobrek FROM ilaclar\").fetchall()\r\n hamile = ilacVeriTabani.execute(\"SELECT hamile FROM ilaclar\").fetchall()\r\n kalp = ilacVeriTabani.execute(\"SELECT kalp FROM ilaclar\").fetchall()\r\n diyabet = ilacVeriTabani.execute(\"SELECT diyabet FROM ilaclar\").fetchall()\r\n hemo = ilacVeriTabani.execute(\"SELECT hemo FROM ilaclar\").fetchall()\r\n aciklama = ilacVeriTabani.execute(\"SELECT aciklama FROM ilaclar\").fetchall()\r\n return(isim[ilacID-1][0], tag[ilacID-1][0], kullanma(ilacID), antibiyotik[ilacID-1][0], agri[ilacID-1][0], antidep[ilacID-1][0], dogum[ilacID-1][0], inceltici[ilacID-1][0],\r\n NSAID[ilacID-1][0], bobrek[ilacID-1][0], hamile[ilacID-1][0], kalp[ilacID-1][0], diyabet[ilacID-1][0], hemo[ilacID-1][0], aciklama[ilacID-1][0])\r\n \r\ndef ilacIsimleri():#veritabanindaki tum ilaclarin isimlerini geri verir\r\n isimDatabase = ilacVeriTabani.execute(\"SELECT isim FROM ilaclar\").fetchall()\r\n isimler = []\r\n for i in range(0, len(isimDatabase)):\r\n isimler.append(isimDatabase[i][0])\r\n return(isimler)\r\n \r\ndef kullanilan(hastaID):#hastanın kullandıgı ilaclarin id'lerini geri verir\r\n hasta = hastaVeriTabani.execute(\"SELECT kullanilan FROM hastalar\").fetchall()\r\n hastaStr = \"\".join(hasta[hastaID-1][0])\r\n hastaStr = hastaStr.split(\",\")\r\n return(hastaStr)\r\n \r\ndef hastaBilgi(hastaID):#hastanin veritabanindaki bilgilerini almak icin kullanilir\r\n isim = hastaVeriTabani.execute(\"SELECT isim FROM hastalar\").fetchall()\r\n TC = hastaVeriTabani.execute(\"SELECT TC FROM hastalar\").fetchall()\r\n bobrek = hastaVeriTabani.execute(\"SELECT bobrek FROM hastalar\").fetchall()\r\n hamile = hastaVeriTabani.execute(\"SELECT hamile FROM hastalar\").fetchall()\r\n kalp = hastaVeriTabani.execute(\"SELECT kalp FROM hastalar\").fetchall()\r\n diyabet = hastaVeriTabani.execute(\"SELECT diyabet FROM hastalar\").fetchall()\r\n hemo = hastaVeriTabani.execute(\"SELECT hemo FROM hastalar\").fetchall()\r\n return(isim[hastaID-1][0], TC[hastaID-1][0], kullanilan(hastaID), bobrek[hastaID-1][0], hamile[hastaID-1][0], kalp[hastaID-1][0], diyabet[hastaID-1][0], hemo[hastaID-1][0])\r\n \r\n\r\ndef ilacKontrol(TCKN, ilacIsim):#hastanin belirli bir ilaci kullanmasinda bir sakinca olup olmadigini kontrol eder\r\n sinirlama = []\r\n ilacIsim=ilacIsim\r\n hastaID = TCdenID(TCKN)\r\n ilacID = idBulma(ilacIsim)\r\n for i in range(0, len(kullanilan(hastaID))):\r\n for j in range(0, len(kullanma(ilacID))):\r\n if(kullanilan(hastaID)[i] == kullanma(ilacID)[j]):\r\n if isimBulma(kullanilan(hastaID)[i])==ilacIsim:\r\n sinirlama.append(\"Bu hasta zaten \"+ilacIsim+\" kullanıyor.\")\r\n else:\r\n sinirlama.append(isimBulma(kullanilan(hastaID)[i]) + \" ve \" + ilacIsim + \" birlikte kullanılamaz.\") \r\n \r\n for l in range(0, len(kullanilan(hastaID))):\r\n try:\r\n ilacTipi = ilacBilgi(int(kullanilan(hastaID)[l]))[1] + 2\r\n except ValueError:\r\n continue\r\n if(ilacTipi == 3):\r\n sinirlama.append(ilacIsim + \", antibiyotiklerle kullanılamaz.\")\r\n elif(ilacTipi == 4):\r\n sinirlama.append(ilacIsim + \", ağrı kesicilerle kullanılamaz.\")\r\n elif(ilacTipi == 5):\r\n sinirlama.append(ilacIsim + \", antidepresanlarla kullanılamaz.\")\r\n elif(ilacTipi == 6):\r\n sinirlama.append(ilacIsim + \", doğum kontrol haplarıyla kullanılamaz.\")\r\n elif(ilacTipi == 7):\r\n sinirlama.append(ilacIsim + \", kan incelticilerle kullanılamaz.\")\r\n elif(ilacTipi == 8):\r\n sinirlama.append(ilacIsim + \", anti-inflamatörlerle kullanılamaz.\")\r\n \r\n for m in range(0, 5):\r\n if(m == 1 and hastaBilgi(hastaID)[m + 3] == 1):\r\n sinirlama.append(\"Bu ilaç gebeliğin \" + str(ilacBilgi(ilacID)[m + 9]) + \". ayına kadar kullanılabilir.\")\r\n else:\r\n if(hastaBilgi(hastaID)[m + 3] == 1 and ilacBilgi(ilacID)[m + 9]):\r\n if(m == 0):\r\n sinirlama.append(\"Böbrek yetmezliği durumlarında \" + ilacBilgi(ilacID)[0] + \" kullanılamaz.\")\r\n elif(m == 2):\r\n sinirlama.append(\"Kalp damar rahatsızlıkları durumlarında \" + ilacBilgi(ilacID)[0] + \" kullanılamaz.\")\r\n elif(m == 3):\r\n sinirlama.append(\"Diyabet durumlarında \" + ilacBilgi(ilacID)[0] + \" kullanılamaz.\")\r\n elif(m == 4):\r\n sinirlama.append(\"Hemofili durumlarında \" + ilacBilgi(ilacID)[0] + \" kullanılamaz.\")\r\n \r\n \r\n return(sinirlama)\r\n \r\n\r\n#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\r\n\r\nimport sqlite3\r\n\r\nilacBaglanti = sqlite3.connect(\"ilaclar.db\" ,check_same_thread=False)\r\nhastaBaglanti = sqlite3.connect(\"hastalar.db\" ,check_same_thread=False)\r\n\r\nif(ilacBaglanti):\r\n print('Ilaclar baglantisi basarili!')\r\nelse:\r\n print('Ilaclar baglantisi basarisiz!')\r\n \r\nif(hastaBaglanti):\r\n print('Hastalar baglantisi basarili!')\r\nelse:\r\n print('Hastalar baglantisi basarisiz!')\r\n \r\n \r\n \r\nilacVeriTabani = ilacBaglanti.cursor()\r\nhastaVeriTabani = hastaBaglanti.cursor()\r\n\r\nilacKontrol(30909671126, \"Ibuprofen\")\r\ndef exit():\r\n ilacBaglanti.commit()\r\n ilacBaglanti.close()\r\n\r\n hastaBaglanti.commit()\r\n hastaBaglanti.close()\r\n\r\n","sub_path":"devamidb.py","file_name":"devamidb.py","file_ext":"py","file_size_in_byte":7294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"268845720","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nclass GeneratorLoss(nn.Module):\n def __init__(self, vgg_network, writer, steps):\n super(GeneratorLoss, self).__init__()\n self.vgg_network = vgg_network\n # self.dis_network = dis_network\n self.writer = writer\n self.steps = steps\n self.mse_loss = nn.MSELoss().cuda()\n self.bce_loss = nn.BCELoss().cuda()\n self.huber_loss = nn.SmoothL1Loss().cuda()\n \n def forward(self, out_labels, out_images, target_images, opt):\n # self.steps += out_images.shape[0]\n # print(\"Image loss: {}\".format(image_loss.item()))\n\n overall_loss = 0\n self.ones_const = Variable(torch.ones(out_images.size()[0])).cuda()\n\n image_loss = self.huber_loss(out_images, target_images)\n self.writer.add_scalar(\"Image Loss\", image_loss, self.steps)\n overall_loss += opt.mse_loss_coefficient * image_loss\n\n if opt.adversarial_loss:\n adversarial_loss = self.bce_loss(out_labels, self.ones_const)\n self.writer.add_scalar(\"Gen Adversarial Loss\", adversarial_loss, self.steps)\n overall_loss += opt.adversarial_loss_coefficient*adversarial_loss\n\n if opt.vgg_loss:\n vgg_perception_loss = self.mse_loss(self.vgg_network(out_images), self.vgg_network(target_images))\n self.writer.add_scalar(\"VGG Perception Loss\", vgg_perception_loss, self.steps)\n overall_loss += opt.vgg_loss_coefficient*vgg_perception_loss\n\n return overall_loss","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"381157296","text":"#!/usr/bin/env python\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.stats as ss\n\nx = [1,2,3,4,5,6,7]\ny = [38.5,132,246.6,389.2,500.7,689,794.6]\nerror = [13.8,25.5,45,44,50,47.7,40]\nplt.figure()\n#print(ss.t.ppf(0.95, data_df)*data_sd)\nline, = plt.plot(x , y ,'o-' , label = 'Flood')\nplt.errorbar(x , y , error,fmt='-o')\nline, = plt.plot(x , y ,'o-' , label = 'Methodology')\nplt.errorbar(x, y, error,fmt='-o')\nplt.title(\"Methodology vs Flood (received messages)\")\nplt.ylabel(\"Number of messages received\")\nplt.xlabel(\"Number of transmitters\")\n\nplt.show()","sub_path":"Initializator(Phython)/graphics/floodcompmessa.py","file_name":"floodcompmessa.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"200744876","text":"# learning.py\r\n# ---------\r\n# Licensing Information: You are free to use or extend these projects for\r\n# educational purposes provided that (1) you do not distribute or publish\r\n# solutions, (2) you retain this notice, and (3) you provide clear\r\n# attribution to Clemson University and the authors.\r\n# \r\n# Authors: Pei Xu (peix@g.clemson.edu) and Ioannis Karamouzas (ioannis@g.clemson.edu)\r\n\r\n\r\n\r\n'''\r\n\r\nTeam Members FOR PROJECT - 5:\r\nSUBBA RAO ILLA (C16280847)\r\nSUNDARESH NARAYANAN (C73923755)\r\n\r\n'''\r\n\r\n\r\n\r\n\"\"\"\r\nIn this assignment, you will implement linear and logistic regression\r\nusing the gradient descent method, as well as the binary perceptron algorithm. \r\nTo complete the assignment, please modify the linear_regression(), binary_perceptron(), \r\nand logistic_regression() functions. \r\n\r\nThe package `matplotlib` is needed for the program to run.\r\nYou should also use the 'numpy' library to vectorize \r\nyour code, enabling a much more efficient implementation of \r\nlinear and logistic regression. You are also free to use the \r\nnative 'math' library of Python. \r\n\r\nAll provided datasets are extracted from the scikit-learn machine learning library. \r\nThese are called `toy datasets`, because they are quite simple and small. \r\nFor more details about the datasets, please see https://scikit-learn.org/stable/datasets/index.html\r\n\r\nEach dataset is randomly split into a training set and a testing set using a ratio of 8 : 2. \r\nYou will use the training set to learn a regression model. Once the training is done, the code\r\nwill automatically validate the fitted model on the testing set. \r\n\"\"\"\r\n\r\n# use math and/or numpy if needed\r\nimport math\r\nimport numpy as np\r\n\r\ndef linear_regression(x, y, logger=None):\r\n \"\"\"\r\n Linear regression using full batch gradient descent.\r\n A 1D array w should be returned by this function such that given a\r\n sample x, a prediction can be obtained by x^T w, where x is a column vector. \r\n The intercept term can be ignored due to that x has been augmented by adding '1' as an extra feature. \r\n You should use as learning rate alpha=0.0001. If you scale the cost function by 1/#samples, use alpha=0.001 \r\n\r\n Parameters\r\n ----------\r\n x: a 2D array of size [N, f+1]\r\n where N is the number of samples, f is the number of features\r\n y: a 1D array of size [N]\r\n It contains the target value for each sample in x\r\n logger: a logger instance for plotting the loss\r\n Usage: logger.log(i, loss) where i is the number of iterations\r\n Log updates can be performed every several iterations to improve performance.\r\n \r\n Returns\r\n -------\r\n w: a 1D array\r\n linear regression parameters\r\n \"\"\"\r\n alpha=0.0001\r\n x_array=np.array(x)\r\n y_array=np.array(y)\r\n x_transpose=np.transpose(x_array)\r\n number_of_features=len(x[0]) \r\n w=np.zeros([number_of_features])\r\n for i in range(0,1000):\r\n h=np.dot(x_array,w)\r\n w=w-(alpha*np.matmul(x_transpose,(h-y_array)))\r\n loss=np.square((h-y_array))\r\n loss=np.sum(loss)\r\n loss=(1/2)*loss\r\n if((i+1)%10==0):\r\n logger.log(i+1,loss)\r\n\r\n return w\r\n\r\ndef binary_perceptron(x, y, logger=None):\r\n \"\"\"\r\n Binary classifaction using a perceptron. \r\n A 1D array w should be returned by this function such that given a\r\n sample x, a prediction can be obtained by\r\n h = (x^T w) \r\n with the decision boundary:\r\n h >= 0 => x in class 1\r\n h < 0 => x in class 0\r\n where x is a column vector. \r\n The intercept/bias term can be ignored due to that x has been augmented by adding '1' as an extra feature. \r\n \r\n \r\n Parameters\r\n ----------\r\n x: a 2D array with the shape [N, f+1]\r\n where N is the number of samples, f is the number of features\r\n y: a 1D array with the shape [N]\r\n It is the ground truth value for each sample in x\r\n logger: a logger instance through which plotting loss\r\n Usage: Please do not use the logger in this function.\r\n \r\n Returns\r\n -------\r\n w: a 1D array\r\n binary perceptron parameters\r\n \"\"\"\r\n x_array=np.array(x)\r\n y_array=np.array(y)\r\n number_of_features=len(x[0])\r\n number_of_samples=len(x)\r\n w=np.zeros([number_of_features])\r\n while True:\r\n flag=False\r\n for i in range(0,number_of_samples):\r\n h=np.dot(x_array[i],w)\r\n if(h<0):\r\n y_hat=0\r\n else:\r\n y_hat=1\r\n if(y_hat!=y_array[i]):\r\n flag=True\r\n w=w+(y_array[i]-y_hat)*x_array[i]\r\n if flag==False:\r\n break\r\n return w\r\n\r\n\r\ndef logistic_regression(x, y, logger=None):\r\n \"\"\"\r\n Logistic regression using batch gradient descent.\r\n A 1D array w should be returned by this function such that given a\r\n sample x, a prediction can be obtained by p = sigmoid(x^T w)\r\n with the decision boundary:\r\n p >= 0.5 => x in class 1\r\n p < 0.5 => x in class 0\r\n where x is a column vector. \r\n The intercept/bias term can be ignored due to that x has been augmented by adding '1' as an extra feature. \r\n In gradient descent, you should use as learning rate alpha=0.001 \r\n\r\n Parameters\r\n ----------\r\n x: a 2D array of size [N, f+1]\r\n where N is the number of samples, f is the number of features\r\n y: a 1D array of size [N]\r\n It contains the ground truth label for each sample in x\r\n logger: a logger instance for plotting the loss\r\n Usage: logger.log(i, loss) where i is the number of iterations\r\n Log updates can be performed every several iterations to improve performance.\r\n \r\n Returns\r\n -------\r\n w: a 1D array\r\n logistic regression parameters\r\n \"\"\"\r\n \r\n alpha=0.001\r\n x_array=np.array(x)\r\n y_array=np.array(y)\r\n x_transpose=np.transpose(x_array)\r\n number_of_features=len(x[0])\r\n w=np.zeros([number_of_features])\r\n for i in range(0,1000):\r\n h=np.dot(x_array,w)\r\n p = 1/(1 + np.exp(-h))\r\n w=w-(alpha*np.matmul(x_transpose,(p-y_array)))\r\n loss=(y_array*np.log(p))+((1-y_array)*(np.log(1-p)))\r\n loss=np.sum(loss)\r\n loss=-loss\r\n if((i+1)%10==0):\r\n logger.log(i+1,loss)\r\n\r\n return w\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import os\r\n import tkinter as tk\r\n from app.regression import App\r\n\r\n import data.load\r\n dbs = {\r\n \"Boston Housing\": (\r\n lambda : data.load(\"boston_house_prices.csv\"),\r\n App.TaskType.REGRESSION\r\n ),\r\n \"Diabetes\": (\r\n lambda : data.load(\"diabetes.csv\", header=0),\r\n App.TaskType.REGRESSION\r\n ),\r\n \"Handwritten Digits\": (\r\n lambda : (data.load(\"digits.csv\", header=0)[0][np.where(np.equal(data.load(\"digits.csv\", header=0)[1], 0) | np.equal(data.load(\"digits.csv\", header=0)[1], 1))],\r\n data.load(\"digits.csv\", header=0)[1][np.where(np.equal(data.load(\"digits.csv\", header=0)[1], 0) | np.equal(data.load(\"digits.csv\", header=0)[1], 1))]),\r\n App.TaskType.BINARY_CLASSIFICATION\r\n ),\r\n \"Breast Cancer\": (\r\n lambda : data.load(\"breast_cancer.csv\"),\r\n App.TaskType.BINARY_CLASSIFICATION\r\n )\r\n }\r\n\r\n algs = {\r\n \"Linear Regression (Batch Gradient Descent)\": (\r\n linear_regression,\r\n App.TaskType.REGRESSION\r\n ),\r\n \"Logistic Regression (Batch Gradient Descent)\": (\r\n logistic_regression,\r\n App.TaskType.BINARY_CLASSIFICATION\r\n ),\r\n \"Binary Perceptron\": (\r\n binary_perceptron,\r\n App.TaskType.BINARY_CLASSIFICATION\r\n )\r\n }\r\n\r\n root = tk.Tk()\r\n App(dbs, algs, root)\r\n tk.mainloop()\r\n","sub_path":"P5_Regression and Binary Perceptron/learning.py","file_name":"learning.py","file_ext":"py","file_size_in_byte":7797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"293296751","text":"from django.urls import path\nfrom . import views\nurlpatterns = [\n path('', views.profile, name='profile'),\n path('mr/', views.mr, name='mr'),\n path('songs/', views.songs, name='songs'),\n path('likes/', views.likes, name='likes'),\n path('followings/', views.followings, name='followings'),\n path('followers/', views.followers, name='followers'),\n path('comments/', views.comments, name='comments'),\n path('track_detail/', views.track_detail, name='track_detail'),\n]","sub_path":"camp4_triples-gigibean1/profilepage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"646453940","text":"#!/usr/bin/env runaiida\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n__copyright__ = (u'Copyright (c), 2016, Forschungszentrum Jülich GmbH, ' 'IAS-1/PGI-1, Germany. All rights reserved.')\n__license__ = 'MIT license, see LICENSE.txt file'\n__version__ = '0.27'\n__contributors__ = 'Jens Broeder'\n\nfrom aiida import load_dbenv, is_dbenv_loaded\nif not is_dbenv_loaded():\n load_dbenv()\n\nimport sys\nimport os\n\nfrom aiida.common.example_helpers import test_and_get_code\nfrom aiida.plugins import DataFactory\n\n# If set to True, will ask AiiDA to run in serial mode (i.e., AiiDA will not\n# invoke the mpirun command in the submission script)\nrun_in_serial_mode = True #False\n\n################################################################\n\nParameterData = DataFactory('parameter')\nStructureData = DataFactory('structure')\nFleurinpData = DataFactory('fleur.fleurinp')\ntry:\n dontsend = sys.argv[1]\n if dontsend == '--dont-send':\n submit_test = True\n elif dontsend == '--send':\n submit_test = False\n else:\n raise IndexError\nexcept IndexError:\n print(('The first parameter can only be either ' '--send or --dont-send'), file=sys.stderr)\n sys.exit(1)\n\ntry:\n codename = sys.argv[2]\nexcept IndexError:\n codename = None\n\nqueue = None\n# queue = \"th1_small\"\nsettings = None\n#####\n\ncode = test_and_get_code(codename, expected_code_type='fleur_inp.fleur')\n\n#TODO: how to make smart path?\n# get where tests folder is, then relative path\ninpxmlfile = '/usr/users/iff_th1/broeder/aiida/github/aiida_fleur_plugin/tests/inp_xml_files/PTO-SOCXML/files/inp.xml'\nenpara = '/usr/users/iff_th1/broeder/aiida/github/aiida_fleur_plugin/tests/inp_xml_files/PTO-SOCXML/files/enpara'\nsymout = '/usr/users/iff_th1/broeder/aiida/github/aiida_fleur_plugin/tests/inp_xml_files/PTO-SOCXML/files/sym.out'\n\nfleurinp = FleurinpData(files=[inpxmlfile, enpara, symout])\nprint(fleurinp.files)\n## For remote codes, it is not necessary to manually set the computer,\n## since it is set automatically by new_calc\n#computer = code.get_remote_computer()\n#calc = code.new_calc(computer=computer)\n\ncalc = code.new_calc()\ncalc.label = 'PTO-SOCXML Fleur test'\ncalc.description = ('Simple test of Fleur with two steps:'\n '1.Generate a starting density and run a single iteration '\n 'and test for ef, total energy. Uses: SOC, LOs')\ncalc.set_max_wallclock_seconds(5 * 60) # 5 min\n# Valid only for Slurm and PBS (using default values for the\n# number_cpus_per_machine), change for SGE-like schedulers\ncalc.set_resources({'num_machines': 1})\nif run_in_serial_mode:\n calc.set_withmpi(False)\n## Otherwise, to specify a given # of cpus per machine, uncomment the following:\n# calc.set_resources({\"num_machines\": 1, \"num_mpiprocs_per_machine\": 8})\n\n#calc.set_custom_scheduler_commands(\"#SBATCH --account=ch3\")\n\nif queue is not None:\n calc.set_queue_name(queue)\n\ncalc.use_fleurinpdata(fleurinp)\n#calc.use_code(code)\n\nif settings is not None:\n calc.use_settings(settings)\n\nif submit_test:\n subfolder, script_filename = calc.submit_test()\n print(\"Test_submit for calculation (uuid='{}')\".format(calc.uuid))\n print('Submit file in {}'.format(os.path.join(os.path.relpath(subfolder.abspath), script_filename)))\nelse:\n calc.store_all()\n print(\"created calculation; calc=Calculation(uuid='{}') # ID={}\".format(calc.uuid, calc.dbnode.pk))\n calc.submit()\n print(\"submitted calculation; calc=Calculation(uuid='{}') # ID={}\".format(calc.uuid, calc.dbnode.pk))\n","sub_path":"examples/submission/fleur_specific_tests/test_PTO_SOCXML_fleur.py","file_name":"test_PTO_SOCXML_fleur.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"314286033","text":"import collections\nimport time\n\nimport karateclub\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\nfrom scipy.spatial.distance import pdist, squareform\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.metrics import adjusted_rand_score\n\nfrom constants import TRIANGLES_COLORS\nfrom utils import toroid_dist_between_points, \\\n get_positions_velocity_headings, charge_labels_simulation\n\nglobal phi\nglobal alpha\nglobal gamma\nphi = 300\nalpha = 1\ngamma = 0.998\n\n\ndef nb_clusters(labels):\n return np.unique(labels).shape[0] - 1\n\n\ndef build_graph(positions, velocities, headings):\n g = nx.Graph()\n data = transform_data_for_graph(positions, velocities, headings)\n g.add_nodes_from(data)\n list_edges = calculate_edges_1(positions, velocities, headings)\n g.add_edges_from(list_edges)\n return g\n\n\ndef transform_data_for_graph(positions, velocities, headings):\n return [(i, dict(pos=tuple(pos), vel=tuple(vel), head=head))\n for i, pos, vel, head in\n zip(list(range(positions.shape[0])), positions.tolist(),\n velocities.tolist(), headings.tolist())]\n\n\ndef calculate_edges_1(positions, velocities, headings, epsilon=150):\n \"\"\"\n simple method for calculating edges with positions,\n velocities and headings to be used later\n \"\"\"\n list_edges = []\n for i in range(positions.shape[0]):\n for j in list(range(positions.shape[0])):\n if i != j:\n if toroid_dist_between_points(positions[i, :],\n positions[j, :]) < epsilon:\n list_edges.append((i, j))\n\n return list_edges\n\n\ndef connected_components_graph(graph):\n \"\"\"\n find connected components\n return list of graphs\n \"\"\"\n graphs_ = [graph.subgraph(c).copy() for c\n in nx.connected_components(graph)]\n\n # rename each subgraph, stock the renaming into a dictionary\n list_renaming = []\n list_new_graphs = []\n for graph in graphs_:\n renaming = dict()\n for i in range(len(graph.nodes)):\n nodes = list(graph.nodes)\n renaming[nodes[i]] = i\n\n list_new_graphs.append(nx.relabel.relabel_nodes(graph, renaming))\n list_renaming.append(renaming)\n\n return list_new_graphs, list_renaming\n\n\ndef label_prop(graph):\n \"\"\"\n label propagation algorithm\n \"\"\"\n model = karateclub.LabelPropagation()\n model.fit(graph)\n\n return model.get_memberships()\n\n\ndef modify_colors(triangles, color_list):\n \"\"\"\n assign color_list to triangles\n \"\"\"\n triangles.set_colors(color_list)\n\n\ndef membership_to_colorlist(membership):\n cluster_membership = [membership[node] for node in\n range(len(membership))]\n\n color_list = []\n for i in range(len(cluster_membership)):\n color_list.append(TRIANGLES_COLORS[cluster_membership[i]\n % len(TRIANGLES_COLORS)])\n return color_list\n\n\ndef graph_step(step, repository=\"simulation_data/\"):\n positions, velocities, headings = \\\n get_positions_velocity_headings(repository, step)\n\n graph = build_graph(positions, velocities, headings)\n\n if len(graph.edges) > 0:\n\n color_dict = dict()\n list_connected_comp, list_renaming = \\\n connected_components_graph(graph)\n\n for subgraph, renaming in zip(list_connected_comp,\n list_renaming):\n\n inv_ren = {v: k for k, v in renaming.items()}\n # if there is one edge or more\n if len(subgraph.edges) > 0:\n\n membership = label_prop(subgraph)\n color_list = membership_to_colorlist(membership)\n for i in range(len(membership)):\n color_dict[inv_ren[i]] = color_list[i]\n\n # else, imply one node alone\n else:\n color_dict[inv_ren[0]] = TRIANGLES_COLORS[0]\n\n color_dict = collections.OrderedDict(sorted(color_dict.items()))\n list_color = list(color_dict.values())\n return list_color\n\n\n############################utils\n\ndef merge_labels(old_labels, new_labels):\n n_old_labels = np.unique(old_labels).shape[0]\n\n for i in range(n_old_labels):\n\n old_indices_label_i = np.where(old_labels == i)[0]\n\n unique, counts = np.unique(new_labels[old_indices_label_i],\n return_counts=True)\n\n if counts.shape[0] > 0:\n arg_ind_max = np.argmax(counts)\n\n if counts[arg_ind_max] > old_indices_label_i.shape[0] // 2:\n\n to_replace = unique[arg_ind_max]\n\n if to_replace != i:\n new_labels = np.where(new_labels == to_replace,\n i, new_labels)\n new_labels = np.where(new_labels == i,\n to_replace, new_labels)\n return new_labels\n\n\ndef stock_labels(labels, step, repository, filename):\n # print(\"data/\" + repository + filename + str(step) + \"saved\")\n np.savetxt(\"data/\" + repository + filename + str(step), labels)\n\n\ndef labels_to_colorlist(labels):\n \"\"\"\n assign a color to a label for each label in\n the labels list, return a list of colors\n \"\"\"\n color_list = []\n for i in range(len(labels)):\n color_list.append(TRIANGLES_COLORS[labels[i]])\n return color_list\n\n\n################################# DBSCAN\n\ndef DBscan_step_positions(step, old_labels, repository, eps=85, min_sample=2):\n \"\"\"\n DBSCAN algorithm on positions\n \"\"\"\n positions, velocities, headings = \\\n get_positions_velocity_headings(repository, step)\n # train_data = np.concatenate((positions, velocities), axis=1)\n train_data = positions\n start = time.time()\n db = DBSCAN(eps=eps, min_samples=min_sample).fit(train_data)\n labels = db.labels_ + 1 # for getting rid of -1 labels\n end = time.time()\n print(\"clustering done in: {0} seconds\".format(end - start))\n if old_labels is not None:\n labels = merge_labels(old_labels, labels)\n stock_labels(labels, step, repository=repository,\n filename=\"DBSCAN_positions_eps=\" + str(eps) + \"min_sample=\" + str(min_sample) + \"_label\")\n\n return labels\n\n\ndef test_DBSCAN_positions(steps, directory, list_eps, list_min_sample):\n name_pandas_file = \"DBSCAN_position_table_results\"\n column_names = [\"eps\", \"min sample\", \"mean ARI score\"]\n\n # create empty dataframe\n results = pd.DataFrame(columns=column_names)\n\n for eps in list_eps:\n for min_sample in list_min_sample:\n\n # produce results for DBSCAN algorithm with these values of eps and min_sample\n\n old_labels = DBscan_step_positions(steps[0], None, directory, eps=eps, min_sample=min_sample)\n\n for step in steps[1:]:\n old_labels = DBscan_step_positions(step, old_labels, directory, eps=eps, min_sample=min_sample)\n\n # produce results\n filename_true = \"ground_truth_label\"\n filename_pred = \"DBSCAN_positions_eps=\" + str(eps) + \"min_sample=\" + str(min_sample) + \"_label\"\n\n score = calculate_rand_score(steps, directory, filename_true, filename_pred)\n results = results.append({\"eps\": eps, \"min sample\": min_sample, \"mean ARI score\": score}, ignore_index=True)\n print(results)\n\n # stock dataframe into a file\n results.to_csv(name_pandas_file + \".csv\", index=False)\n\n\ndef DBscan_step_positions_and_velocity(step, old_labels, repository,\n alpha=1,\n beta=27,\n eps=85,\n min_sample=2):\n \"\"\"\n DBSCAN algorithm on positions + beta * velocities\n \"\"\"\n positions, velocities, headings = \\\n get_positions_velocity_headings(repository, step)\n\n train_data = np.concatenate((alpha * positions, beta * velocities), axis=1)\n\n start = time.time()\n db = DBSCAN(eps=eps, min_samples=min_sample).fit(train_data)\n end = time.time()\n print(\"clustering done in: {0} seconds\".format(end - start))\n labels = db.labels_ + 1 # for getting rid of -1 labels\n if old_labels is not None:\n labels = merge_labels(old_labels, labels)\n stock_labels(labels, step, repository=repository,\n filename=\"DBSCAN_position|velocity_eps=\" + str(eps) + \"min_sample=\" + str(min_sample)\n + \"alpha=\" + str(alpha) + \"beta=\" + str(beta) + \"label\")\n\n return labels\n\n\ndef test_DBSCAN_positions_and_velocity(steps, directory, list_eps, list_min_sample, list_alpha, list_beta):\n name_pandas_file = \"DBSCAN_position_and_velocity_table_results\"\n column_names = [\"eps\", \"min sample\", \"alpha\", \"beta\", \"mean ARI score\"]\n\n # create empty dataframe\n results = pd.DataFrame(columns=column_names)\n\n for eps in list_eps:\n for min_sample in list_min_sample:\n for beta in list_beta:\n for alpha in list_alpha:\n\n # produce results for DBSCAN algorithm with these values of eps and min_sample\n old_labels = DBscan_step_positions_and_velocity(steps[0], None, directory, alpha=alpha,\n beta=beta,\n min_sample=min_sample,\n eps=eps)\n\n for step in steps[1:]:\n old_labels = DBscan_step_positions_and_velocity(step,\n old_labels,\n directory,\n alpha=alpha,\n beta=beta,\n min_sample=min_sample,\n eps=eps)\n\n # produce results\n filename_true = \"ground_truth_label\"\n filename_pred = \"DBSCAN_position|velocity_eps=\" + str(eps) + \"min_sample=\" + str(min_sample) \\\n + \"alpha=\" + str(alpha) + \"beta=\" + str(beta) + \"label\"\n\n score = calculate_rand_score(steps, directory, filename_true, filename_pred)\n results = results.append({\"eps\": eps, \"min sample\": min_sample, \"alpha\": alpha, \"beta\": beta,\n \"mean ARI score\": score}, ignore_index=True)\n print(results)\n # stock dataframe into a file\n results.to_csv(name_pandas_file + \".csv\", index=False)\n\n\ndef linear_comb_dist12(a1, a2):\n global phi, alpha\n return alpha * d1(a1, a2) + phi * d2(a1, a2)\n\n\ndef linear_comb_dist12_multiplestep(a1, a2, nb_step=3, gamma=0.5):\n # a1 and a2 are matrices with multiple timesteps data\n res = 0\n for i in range(0, nb_step):\n res = res + gamma ** (nb_step - i) * linear_comb_dist12(a1[i:i + 2], a2[i:i + 2])\n return res\n\n\ndef d1(a1, a2):\n res = np.linalg.norm(a1[:2] - a2[:2])\n return res\n\n\ndef d2(a1, a2):\n similarity = np.dot(a1[2:], a2[2:]) / (np.linalg.norm(a1[2:]) * np.linalg.norm(a2[2:]))\n angular_dist = np.abs(similarity - 1)\n return angular_dist\n\n\ndef linear_comb_dist12_multistep_precomputed(X, nb_step=3, gamma=0.5):\n global phi, alpha\n\n list_linear_dist = np.array([alpha * pdist(X[:, i:i + 2], metric=d1)\n + phi * pdist(X[:, i + 2:i + 4], metric=d1) for i in\n range(0, (nb_step - 1) * 4 + 1, 4)])\n\n list_linear_dist_with_gamma = np.array([gamma ** (nb_step - i) * list_linear_dist[i] for i in range(0, nb_step)])\n\n final_result = np.sum(list_linear_dist_with_gamma, axis=0)\n\n return squareform(final_result)\n\n\ndef DBscan_step_intuition_dist_multistep_1(step, old_labels, repository, min_sample=2,\n eps=85, nb_step=5, phi_=300, alpha_=1, gamma_=0.998):\n \"\"\"\n DBSCAN algorithm using euclidian metric on data = alpha * positions[i] + phi * velocities[i] for i = t-n, ..., t\n \"\"\"\n global phi, alpha, gamma\n\n phi = phi_\n alpha = alpha_\n gamma = gamma_\n\n positions, velocities, headings = \\\n get_positions_velocity_headings(repository, step - nb_step)\n\n train_data = np.concatenate((alpha * positions, phi * velocities), axis=1)\n\n for k, i in zip(range(1, nb_step - 1, 1), range(step - nb_step + 1, step, 1)):\n positions, velocities, headings = \\\n get_positions_velocity_headings(repository, i)\n\n train_data = np.concatenate(\n (train_data, gamma ** k * np.concatenate((alpha * positions, phi * velocities), axis=1)),\n axis=1)\n\n start = time.time()\n\n db = DBSCAN(eps=eps, min_samples=min_sample).fit(train_data)\n\n end = time.time()\n print(\"clustering done in: {0} seconds\".format(end - start))\n labels = db.labels_ + 1 # for getting rid of -1 labels\n\n if old_labels is not None:\n labels = merge_labels(old_labels, labels)\n stock_labels(labels, step, repository=repository,\n filename=\"DBSCAN_intuition_distmultisteps_phi=\" + str(phi) + \"_alpha=\"\n + str(alpha) + \"gamma=\" + str(gamma)\n + \"nb_step=\" + str(nb_step) + \"_label\")\n\n return labels\n\n\ndef test_DBSCAN_positions_and_velocity_multistep_1(steps, directory, list_alpha, list_phi, list_gamma, list_nb_steps):\n name_pandas_file = \"test_DBSCAN_positions_and_velocity_multistep_1_table_results\"\n column_names = [\"alpha\", \"phi\", \"gamma\", \"num steps\", \"mean ARI score\"]\n\n # create empty dataframe\n results = pd.DataFrame(columns=column_names)\n\n global phi\n global alpha\n global gamma\n\n eps = 85\n min_sample = 2\n for phi_ in list_phi:\n for alpha_ in list_alpha:\n for gamma_ in list_gamma:\n for nb_steps in list_nb_steps:\n phi = phi_\n alpha = alpha_\n gamma = gamma_\n\n # produce results for DBSCAN algorithm with these values of eps and min_sample\n old_labels = DBscan_step_intuition_dist_multistep_1(steps[0],\n None,\n directory,\n min_sample=min_sample,\n eps=eps,\n nb_step=nb_steps)\n for step in steps[1:]:\n old_labels = DBscan_step_intuition_dist_multistep_1(step,\n old_labels,\n directory,\n min_sample=min_sample,\n eps=eps,\n nb_step=nb_steps)\n\n # produce results\n filename_true = \"ground_truth_label\"\n filename_pred = \"DBSCAN_intuition_distmultisteps_phi=\" + str(phi) + \"_alpha=\" \\\n + str(alpha) + \"gamma=\" + str(gamma) \\\n + \"nb_step=\" + str(nb_steps) + \"_label\"\n\n score = calculate_rand_score(steps, directory, filename_true, filename_pred)\n results = results.append({\"alpha\": alpha, \"phi\": phi, \"gamma\": gamma, \"num steps\": nb_steps,\n \"mean ARI score\": score}, ignore_index=True)\n print(results)\n # stock dataframe into a file\n results.to_csv(name_pandas_file + \".csv\", index=False)\n\n\ndef DBscan_step_intuition_dist_multistep(step, old_labels, repository, min_sample=2,\n eps=85, nb_step=3, gamma=0.5):\n \"\"\"\n DBSCAN algorithm on positions + beta * velocities\n \"\"\"\n global phi, alpha\n precomputed_ = True\n train_data = None\n\n for i in range(step, step + nb_step, 1):\n\n positions, velocities, headings = \\\n get_positions_velocity_headings(repository, step)\n\n if train_data is not None:\n\n train_data = np.concatenate((train_data, np.concatenate((positions, velocities), axis=1)),\n axis=1)\n else:\n\n train_data = np.concatenate((positions, velocities), axis=1)\n\n start = time.time()\n\n if precomputed_:\n\n train_data = linear_comb_dist12_multistep_precomputed(train_data)\n db = DBSCAN(eps=eps, min_samples=min_sample, metric='precomputed').fit(train_data)\n\n else:\n db = DBSCAN(eps=eps, min_samples=min_sample, metric=linear_comb_dist12_multiplestep).fit(train_data)\n\n end = time.time()\n print(\"clustering done in: {0} seconds\".format(end - start))\n labels = db.labels_ + 1 # for getting rid of -1 labels\n\n if old_labels is not None:\n labels = merge_labels(old_labels, labels)\n stock_labels(labels, step, repository=repository,\n filename=\"DBSCAN_intuition_distmultisteps_phi=\" + str(phi) + \"_alpha=\"\n + str(alpha) + \"gamma=\" + str(gamma) + \"_label\")\n\n return labels\n\n\ndef DBscan_step_intuition_dist(step, old_labels, repository,\n min_sample=2, eps=85, phi_=10, alpha_=1):\n \"\"\"\n DBcsan algorithm on positions + beta * velocities\n \"\"\"\n global phi, alpha\n\n phi = phi_\n alpha = alpha_\n\n positions, velocities, headings = \\\n get_positions_velocity_headings(repository, step)\n\n train_data = np.concatenate((positions, velocities), axis=1)\n start = time.time()\n\n db = DBSCAN(eps=eps, min_samples=min_sample, metric=linear_comb_dist12).fit(train_data)\n\n end = time.time()\n print(\"clustering done in: {0} seconds\".format(end - start))\n\n labels = db.labels_ + 1 # for getting rid of -1 labels\n if old_labels is not None:\n labels = merge_labels(old_labels, labels)\n stock_labels(labels, step, repository=repository,\n filename=\"DBSCAN_intuition_dist_phi=\" + str(phi) + \"_alpha=\" + str(alpha) + \"_label\")\n return labels\n\n\ndef test_DBSCAN_new_metric_positions_and_velocity(steps, directory, list_alpha, list_phi):\n name_pandas_file = \"DBSCAN_new_metric_position_and_velocity_table_results\"\n column_names = [\"alpha\", \"phi\", \"mean ARI score\"]\n\n # create empty dataframe\n results = pd.DataFrame(columns=column_names)\n\n global phi\n global alpha\n\n eps = 85\n min_sample = 2\n for phi_ in list_phi:\n for alpha_ in list_alpha:\n\n phi = phi_\n alpha = alpha_\n\n # produce results for DBSCAN algorithm with these values of eps and min_sample\n old_labels = DBscan_step_intuition_dist(steps[0], None, directory,\n min_sample=min_sample,\n eps=eps)\n\n for step in steps[1:]:\n old_labels = DBscan_step_intuition_dist(step,\n old_labels,\n directory,\n min_sample=min_sample,\n eps=eps)\n\n # produce results\n filename_true = \"ground_truth_label\"\n filename_pred = \"DBSCAN_intuition_dist_phi=\" + str(phi) + \"_alpha=\" + str(alpha) + \"_label\"\n\n score = calculate_rand_score(steps, directory, filename_true, filename_pred)\n results = results.append({\"alpha\": alpha, \"phi\": phi,\n \"mean ARI score\": score}, ignore_index=True)\n\n # stock dataframe into a file\n res = results.sort_values([\"mean ARI score\"], ascending=False)\n print(res.head(10))\n results.to_csv(name_pandas_file + \".csv\", index=False)\n\n\ndef build_ground_truth(step, old_labels, repository, list_nb_boids,\n beta=23,\n eps=75,\n min_sample=2):\n \"\"\"\n build ground truth with DBscan on positions\n \"\"\"\n positions, velocities, headings = \\\n get_positions_velocity_headings(repository, step)\n\n labels = np.zeros(positions.shape[0], dtype=int)\n\n sum_boids = 0\n\n for nb_boids in list_nb_boids:\n\n indices = np.arange(sum_boids, sum_boids + nb_boids)\n\n sum_boids = sum_boids + nb_boids\n \"\"\"\n train_data = np.concatenate((positions[indices],\n beta * velocities[indices]),\n axis=1)\n \"\"\"\n train_data = positions[indices]\n\n db = DBSCAN(eps=eps, min_samples=min_sample).fit(train_data)\n\n for ind_0, ind in zip(np.arange(0, nb_boids), indices):\n # we keep the zeros, we apply + 50 to the other labels\n labels[ind] = np.where(db.labels_[ind_0] > -1,\n db.labels_[ind_0] + sum_boids, 0)\n # for getting rid of -1 labels\n # we apply db.labels + sum_boids to differentiate clusters\n # from different species\n\n if old_labels is not None:\n labels[indices] = merge_labels(old_labels[indices],\n labels[indices])\n\n stock_labels(labels, step, repository=repository,\n filename=\"ground_truth_label\")\n\n return labels\n\n\ndef calculate_rand_score(steps, repository, filename_true, filename_pred):\n \"\"\"\n Compute Rand index at each step specified\n steps: list of step to compute\n repository: repository where the labels are\n filename_1: name of files for first labels\n filename_2: name of files for second labels\n output_file: file with Rand index scores for each steps\n \"\"\"\n # for all files\n list_scores = list()\n for step in steps:\n labels_true = charge_labels_simulation(repository, filename_true, step)\n labels_pred = charge_labels_simulation(repository, filename_pred, step)\n\n score = adjusted_rand_score(labels_true, labels_pred)\n list_scores.append(score)\n\n scores = np.array(list_scores)\n # save scores into a file\n filename = \"scores/\" + \"_ARIscore_\" + filename_pred + filename_true\n print(\"mean ARI score:\", np.mean(scores))\n np.savetxt(filename, scores)\n return np.mean(scores)\n\n\ndef calculate_stability(old_labels, new_labels):\n \"\"\"\n compare new labels and old labels applying the formula (100 - %change of population in clusters)/100\n \"\"\"\n result = None\n # get information about each clusters of old labels\n old_labels_, old_labels_indices = np.unique(old_labels)\n new_labels_, new_labels_indices = np.unique(new_labels)\n return result\n\n\nfrom convex import graham_scan\n\n\ndef calculate_convex_envelope(positions, labels_clusters):\n \"\"\"\n return a list of convex envelope which is a dictionary of indices\n \"\"\"\n dic = dict()\n for i in np.unique(labels_clusters):\n if i == 0:\n pass\n else:\n dic[i] = []\n indices_i = np.where(labels_clusters == i)[0]\n pos = positions[indices_i, :]\n hull = graham_scan(pos, False)\n dic[i].append(hull)\n print(hull)\n\n return dic\n\n\nif __name__ == \"__main__\":\n # steps to test the clustering algorithm\n steps = list(np.arange(1000, 1100))\n\n # directory where the results will be stored.\n directory = \"simulation_data_200_Boids/\"\n \"\"\"\n list_eps = [65, 70, 75, 80, 85, 90]\n list_min_sample = [2, 3, 4, 5]\n\n # run test for DBSCAN using positions\n test_DBSCAN_positions(directory=directory,\n steps=steps,\n list_eps=list_eps,\n list_min_sample=list_min_sample)\n\n list_alpha = [0.8, 1, 1.2]\n list_beta = [10, 15, 20, 25, 30, 35, 40]\n list_eps = [85]\n list_min_sample = [2]\n\n # run test for DBSCAN on positions and velocities\n test_DBSCAN_positions_and_velocity(directory=directory,\n steps=steps,\n list_alpha=list_alpha,\n list_beta=list_beta,\n list_eps=list_eps,\n list_min_sample=list_min_sample)\n \n list_phi = [50, 100, 150, 200]\n list_alpha = [0.8, 1, 1.2]\n\n test_DBSCAN_new_metric_positions_and_velocity(directory=directory,\n steps=steps,\n list_alpha=list_alpha,\n list_phi=list_phi)\n \"\"\"\n list_alpha = [0.4, 0.6, 0.8, 1, 1.2, 1.4]\n list_phi = [10, 25, 40]\n list_gamma = [0.5, 0.8, 0.9, 0.95]\n list_nb_steps = [3, 4, 5]\n test_DBSCAN_positions_and_velocity_multistep_1(steps, directory, list_alpha, list_phi, list_gamma, list_nb_steps)\n\n","sub_path":"ML.py","file_name":"ML.py","file_ext":"py","file_size_in_byte":25558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"}